{-# LANGUAGE ConstraintKinds #-} {-# LANGUAGE DataKinds #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE ScopedTypeVariables #-} {-# OPTIONS_GHC -fno-warn-name-shadowing #-} {-# OPTIONS_GHC -fno-warn-incomplete-patterns #-} module TensorFlow.GenOps.Core where import Data.ByteString (ByteString) import Data.Complex (Complex) import Data.Int (Int8, Int16, Int32, Int64) import Data.Proxy (Proxy(Proxy)) import Data.Word (Word8, Word16) import Lens.Family2 ((.~), (&)) import TensorFlow.Build import TensorFlow.BuildOp import TensorFlow.Tensor import TensorFlow.Types -- | Raise a exception to abort the process when called. If exit_without_error is true, the process will exit normally, otherwise it will exit with a SIGABORT signal. -- -- Returns nothing but an exception. abort :: forall m' . (MonadBuild m') => m' (ControlNode) abort = abort' id abort' :: forall m' . (MonadBuild m') => OpParams -> m' (ControlNode) abort' op'options | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "Abort" & op'options & opInputs .~ op'inputs) {- attr { name: "error_msg" type: "string" default_value { s: "" } description: "A string which is the message associated with the exception." } attr { name: "exit_without_error" type: "bool" default_value { b: false } } -} -- | Computes the absolute value of a tensor. -- -- Given a tensor `x`, this operation returns a tensor containing the absolute -- value of each element in `x`. For example, if x is an input element and y is -- an output element, this operation computes \\(y = |x|\\). abs :: forall v'1 t . (OneOf '[Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ abs = abs' id abs' :: forall v'1 t . (OneOf '[Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ abs' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "Abs" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 } } } -} -- | Applies a gradient to a given accumulator. Does not add if local_step is lesser -- -- than the accumulator's global_step. accumulatorApplyGradient :: forall v'2 v'3 dtype m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] dtype) => Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a accumulator. -> Tensor v'2 Data.Int.Int64 -- ^ __local_step__: The local_step value at which the gradient was computed. -> Tensor v'3 dtype -- ^ __gradient__: A tensor of the gradient to be accumulated. -> m' (ControlNode) accumulatorApplyGradient = accumulatorApplyGradient' id accumulatorApplyGradient' :: forall v'2 v'3 dtype m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] dtype) => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a accumulator. -> Tensor v'2 Data.Int.Int64 -- ^ __local_step__: The local_step value at which the gradient was computed. -> Tensor v'3 dtype -- ^ __gradient__: A tensor of the gradient to be accumulated. -> m' (ControlNode) accumulatorApplyGradient' op'options handle local_step gradient | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs local_step, buildInputs gradient] buildOp [] (opDef "AccumulatorApplyGradient" & opAttr "dtype" .~ tensorType (undefined :: dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" description: "The handle to a accumulator." type: DT_STRING is_ref: true } input_arg { name: "local_step" description: "The local_step value at which the gradient was computed." type: DT_INT64 } input_arg { name: "gradient" description: "A tensor of the gradient to be accumulated." type_attr: "dtype" } attr { name: "dtype" type: "type" description: "The data type of accumulated gradients. Needs to correspond to the type\nof the accumulator." allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } -} -- | Returns the number of gradients aggregated in the given accumulators. accumulatorNumAccumulated :: forall m' . (MonadBuild m') => Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to an accumulator. -> m' (Tensor Value Data.Int.Int32) -- ^ __num_accumulated__: The number of gradients aggregated in the given accumulator. accumulatorNumAccumulated = accumulatorNumAccumulated' id accumulatorNumAccumulated' :: forall m' . (MonadBuild m') => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to an accumulator. -> m' (Tensor Value Data.Int.Int32) -- ^ __num_accumulated__: The number of gradients aggregated in the given accumulator. accumulatorNumAccumulated' op'options handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle] buildOp [] (opDef "AccumulatorNumAccumulated" & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" description: "The handle to an accumulator." type: DT_STRING is_ref: true } output_arg { name: "num_accumulated" description: "The number of gradients aggregated in the given accumulator." type: DT_INT32 } -} -- | Updates the accumulator with a new value for global_step. Logs warning if the -- -- accumulator's value is already higher than new_global_step. accumulatorSetGlobalStep :: forall v'2 m' . (MonadBuild m') => Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to an accumulator. -> Tensor v'2 Data.Int.Int64 -- ^ __new_global_step__: The new global_step value to set. -> m' (ControlNode) accumulatorSetGlobalStep = accumulatorSetGlobalStep' id accumulatorSetGlobalStep' :: forall v'2 m' . (MonadBuild m') => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to an accumulator. -> Tensor v'2 Data.Int.Int64 -- ^ __new_global_step__: The new global_step value to set. -> m' (ControlNode) accumulatorSetGlobalStep' op'options handle new_global_step | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs new_global_step] buildOp [] (opDef "AccumulatorSetGlobalStep" & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" description: "The handle to an accumulator." type: DT_STRING is_ref: true } input_arg { name: "new_global_step" description: "The new global_step value to set." type: DT_INT64 } -} -- | Extracts the average gradient in the given ConditionalAccumulator, provided -- -- that sufficient (i.e., more than num_required) gradients have been accumulated. -- The op blocks until sufficient gradients have been accumulated. -- If the accumulator has already aggregated more than num_required gradients, it -- returns the average of the accumulated gradients. -- Also automatically increments the recorded global_step in the accumulator by 1, -- and resets the aggregate to 0. accumulatorTakeGradient :: forall v'2 dtype m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] dtype) => Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to an accumulator. -> Tensor v'2 Data.Int.Int32 -- ^ __num_required__: Number of gradients required before we return an aggregate. -> m' (Tensor Value dtype) -- ^ __average__: The average of the accumulated gradients. accumulatorTakeGradient = accumulatorTakeGradient' id accumulatorTakeGradient' :: forall v'2 dtype m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] dtype) => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to an accumulator. -> Tensor v'2 Data.Int.Int32 -- ^ __num_required__: Number of gradients required before we return an aggregate. -> m' (Tensor Value dtype) -- ^ __average__: The average of the accumulated gradients. accumulatorTakeGradient' op'options handle num_required | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs num_required] buildOp [] (opDef "AccumulatorTakeGradient" & opAttr "dtype" .~ tensorType (undefined :: dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" description: "The handle to an accumulator." type: DT_STRING is_ref: true } input_arg { name: "num_required" description: "Number of gradients required before we return an aggregate." type: DT_INT32 } output_arg { name: "average" description: "The average of the accumulated gradients." type_attr: "dtype" } attr { name: "dtype" type: "type" description: "The data type of accumulated gradients. Needs to correspond to the type\nof the accumulator." allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } -} -- | Computes acos of x element-wise. acos :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ acos = acos' id acos' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ acos' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "Acos" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | Returns x + y element-wise. -- -- *NOTE*: `Add` supports broadcasting. `AddN` does not. More about broadcasting -- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) add :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.ByteString.ByteString, Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ add = add' id add' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.ByteString.ByteString, Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ add' op'options x y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y] return (opDef "Add" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_UINT8 type: DT_INT8 type: DT_INT16 type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_STRING } } } -} -- | Add an `N`-minibatch `SparseTensor` to a `SparseTensorsMap`, return `N` handles. -- -- A `SparseTensor` of rank `R` is represented by three tensors: `sparse_indices`, -- `sparse_values`, and `sparse_shape`, where -- -- ```sparse_indices.shape[1] == sparse_shape.shape[0] == R``` -- -- An `N`-minibatch of `SparseTensor` objects is represented as a `SparseTensor` -- having a first `sparse_indices` column taking values between `[0, N)`, where -- the minibatch size `N == sparse_shape[0]`. -- -- The input `SparseTensor` must have rank `R` greater than 1, and the first -- dimension is treated as the minibatch dimension. Elements of the `SparseTensor` -- must be sorted in increasing order of this first dimension. The stored -- `SparseTensor` objects pointed to by each row of the output `sparse_handles` -- will have rank `R-1`. -- -- The `SparseTensor` values can then be read out as part of a minibatch by passing -- the given keys as vector elements to `TakeManySparseFromTensorsMap`. To ensure -- the correct `SparseTensorsMap` is accessed, ensure that the same -- `container` and `shared_name` are passed to that Op. If no `shared_name` -- is provided here, instead use the *name* of the Operation created by calling -- `AddManySparseToTensorsMap` as the `shared_name` passed to -- `TakeManySparseFromTensorsMap`. Ensure the Operations are colocated. addManySparseToTensorsMap :: forall v'1 v'2 v'3 t m' . (MonadBuild m', TensorType t) => Tensor v'1 Data.Int.Int64 -- ^ __sparse_indices__: 2-D. The `indices` of the minibatch `SparseTensor`. -- `sparse_indices[:, 0]` must be ordered values in `[0, N)`. -> Tensor v'2 t -- ^ __sparse_values__: 1-D. The `values` of the minibatch `SparseTensor`. -> Tensor v'3 Data.Int.Int64 -- ^ __sparse_shape__: 1-D. The `shape` of the minibatch `SparseTensor`. -- The minibatch size `N == sparse_shape[0]`. -> m' (Tensor Value Data.Int.Int64) -- ^ __sparse_handles__: 1-D. The handles of the `SparseTensor` now stored in the -- `SparseTensorsMap`. Shape: `[N]`. addManySparseToTensorsMap = addManySparseToTensorsMap' id addManySparseToTensorsMap' :: forall v'1 v'2 v'3 t m' . (MonadBuild m', TensorType t) => OpParams -> Tensor v'1 Data.Int.Int64 -- ^ __sparse_indices__: 2-D. The `indices` of the minibatch `SparseTensor`. -- `sparse_indices[:, 0]` must be ordered values in `[0, N)`. -> Tensor v'2 t -- ^ __sparse_values__: 1-D. The `values` of the minibatch `SparseTensor`. -> Tensor v'3 Data.Int.Int64 -- ^ __sparse_shape__: 1-D. The `shape` of the minibatch `SparseTensor`. -- The minibatch size `N == sparse_shape[0]`. -> m' (Tensor Value Data.Int.Int64) -- ^ __sparse_handles__: 1-D. The handles of the `SparseTensor` now stored in the -- `SparseTensorsMap`. Shape: `[N]`. addManySparseToTensorsMap' op'options sparse_indices sparse_values sparse_shape | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs sparse_indices, buildInputs sparse_values, buildInputs sparse_shape] buildOp [] (opDef "AddManySparseToTensorsMap" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "sparse_indices" description: "2-D. The `indices` of the minibatch `SparseTensor`.\n`sparse_indices[:, 0]` must be ordered values in `[0, N)`." type: DT_INT64 } input_arg { name: "sparse_values" description: "1-D. The `values` of the minibatch `SparseTensor`." type_attr: "T" } input_arg { name: "sparse_shape" description: "1-D. The `shape` of the minibatch `SparseTensor`.\nThe minibatch size `N == sparse_shape[0]`." type: DT_INT64 } output_arg { name: "sparse_handles" description: "1-D. The handles of the `SparseTensor` now stored in the\n`SparseTensorsMap`. Shape: `[N]`." type: DT_INT64 } attr { name: "T" type: "type" } attr { name: "container" type: "string" default_value { s: "" } description: "The container name for the `SparseTensorsMap` created by this op." } attr { name: "shared_name" type: "string" default_value { s: "" } description: "The shared name for the `SparseTensorsMap` created by this op.\nIf blank, the new Operation\'s unique name is used." } -} -- | Add all input tensors element wise. addN :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => [Tensor v'1 t] -- ^ __inputs__: Must all be the same size and shape. -> Tensor Build t -- ^ __sum__ addN = addN' id addN' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> [Tensor v'1 t] -- ^ __inputs__: Must all be the same size and shape. -> Tensor Build t -- ^ __sum__ addN' op'options inputs | eqLengthGuard [("N", [("inputs", length inputs)])] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs inputs] return (opDef "AddN" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "N" .~ n & op'options & opInputs .~ op'inputs) where n = fromIntegral (length inputs) :: Int64 {- input_arg { name: "inputs" description: "Must all be the same size and shape." type_attr: "T" number_attr: "N" } output_arg { name: "sum" type_attr: "T" } attr { name: "N" type: "int" has_minimum: true minimum: 1 } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } -} -- | Add a `SparseTensor` to a `SparseTensorsMap` return its handle. -- -- A `SparseTensor` is represented by three tensors: `sparse_indices`, -- `sparse_values`, and `sparse_shape`. -- -- This operator takes the given `SparseTensor` and adds it to a container -- object (a `SparseTensorsMap`). A unique key within this container is generated -- in the form of an `int64`, and this is the value that is returned. -- -- The `SparseTensor` can then be read out as part of a minibatch by passing -- the key as a vector element to `TakeManySparseFromTensorsMap`. To ensure -- the correct `SparseTensorsMap` is accessed, ensure that the same -- `container` and `shared_name` are passed to that Op. If no `shared_name` -- is provided here, instead use the *name* of the Operation created by calling -- `AddSparseToTensorsMap` as the `shared_name` passed to -- `TakeManySparseFromTensorsMap`. Ensure the Operations are colocated. addSparseToTensorsMap :: forall v'1 v'2 v'3 t m' . (MonadBuild m', TensorType t) => Tensor v'1 Data.Int.Int64 -- ^ __sparse_indices__: 2-D. The `indices` of the `SparseTensor`. -> Tensor v'2 t -- ^ __sparse_values__: 1-D. The `values` of the `SparseTensor`. -> Tensor v'3 Data.Int.Int64 -- ^ __sparse_shape__: 1-D. The `shape` of the `SparseTensor`. -> m' (Tensor Value Data.Int.Int64) -- ^ __sparse_handle__: 0-D. The handle of the `SparseTensor` now stored in the -- `SparseTensorsMap`. addSparseToTensorsMap = addSparseToTensorsMap' id addSparseToTensorsMap' :: forall v'1 v'2 v'3 t m' . (MonadBuild m', TensorType t) => OpParams -> Tensor v'1 Data.Int.Int64 -- ^ __sparse_indices__: 2-D. The `indices` of the `SparseTensor`. -> Tensor v'2 t -- ^ __sparse_values__: 1-D. The `values` of the `SparseTensor`. -> Tensor v'3 Data.Int.Int64 -- ^ __sparse_shape__: 1-D. The `shape` of the `SparseTensor`. -> m' (Tensor Value Data.Int.Int64) -- ^ __sparse_handle__: 0-D. The handle of the `SparseTensor` now stored in the -- `SparseTensorsMap`. addSparseToTensorsMap' op'options sparse_indices sparse_values sparse_shape | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs sparse_indices, buildInputs sparse_values, buildInputs sparse_shape] buildOp [] (opDef "AddSparseToTensorsMap" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "sparse_indices" description: "2-D. The `indices` of the `SparseTensor`." type: DT_INT64 } input_arg { name: "sparse_values" description: "1-D. The `values` of the `SparseTensor`." type_attr: "T" } input_arg { name: "sparse_shape" description: "1-D. The `shape` of the `SparseTensor`." type: DT_INT64 } output_arg { name: "sparse_handle" description: "0-D. The handle of the `SparseTensor` now stored in the\n`SparseTensorsMap`." type: DT_INT64 } attr { name: "T" type: "type" } attr { name: "container" type: "string" default_value { s: "" } description: "The container name for the `SparseTensorsMap` created by this op." } attr { name: "shared_name" type: "string" default_value { s: "" } description: "The shared name for the `SparseTensorsMap` created by this op.\nIf blank, the new Operation\'s unique name is used." } -} -- | Deprecated. Disallowed in GraphDef version >= 2. adjustContrast :: forall v'1 v'2 v'3 v'4 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __images__ -> Tensor v'2 Float -- ^ __contrast_factor__ -> Tensor v'3 Float -- ^ __min_value__ -> Tensor v'4 Float -- ^ __max_value__ -> Tensor Build Float -- ^ __output__ adjustContrast = adjustContrast' id adjustContrast' :: forall v'1 v'2 v'3 v'4 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __images__ -> Tensor v'2 Float -- ^ __contrast_factor__ -> Tensor v'3 Float -- ^ __min_value__ -> Tensor v'4 Float -- ^ __max_value__ -> Tensor Build Float -- ^ __output__ adjustContrast' op'options images contrast_factor min_value max_value | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs images, buildInputs contrast_factor, buildInputs min_value, buildInputs max_value] return (opDef "AdjustContrast" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "images" type_attr: "T" } input_arg { name: "contrast_factor" type: DT_FLOAT } input_arg { name: "min_value" type: DT_FLOAT } input_arg { name: "max_value" type: DT_FLOAT } output_arg { name: "output" type: DT_FLOAT } attr { name: "T" type: "type" allowed_values { list { type: DT_UINT8 type: DT_INT8 type: DT_INT16 type: DT_INT32 type: DT_INT64 type: DT_FLOAT type: DT_DOUBLE } } } -} -- | Adjust the contrast of one or more images. -- -- `images` is a tensor of at least 3 dimensions. The last 3 dimensions are -- interpreted as `[height, width, channels]`. The other dimensions only -- represent a collection of images, such as `[batch, height, width, channels].` -- -- Contrast is adjusted independently for each channel of each image. -- -- For each channel, the Op first computes the mean of the image pixels in the -- channel and then adjusts each component of each pixel to -- `(x - mean) * contrast_factor + mean`. adjustContrastv2 :: Tensor v'1 Float -- ^ __images__: Images to adjust. At least 3-D. -> Tensor v'2 Float -- ^ __contrast_factor__: A float multiplier for adjusting contrast. -> Tensor Build Float -- ^ __output__: The contrast-adjusted image or images. adjustContrastv2 = adjustContrastv2' id adjustContrastv2' :: OpParams -> Tensor v'1 Float -- ^ __images__: Images to adjust. At least 3-D. -> Tensor v'2 Float -- ^ __contrast_factor__: A float multiplier for adjusting contrast. -> Tensor Build Float -- ^ __output__: The contrast-adjusted image or images. adjustContrastv2' op'options images contrast_factor | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs images, buildInputs contrast_factor] return (opDef "AdjustContrastv2" & op'options & opInputs .~ op'inputs) {- input_arg { name: "images" description: "Images to adjust. At least 3-D." type: DT_FLOAT } input_arg { name: "contrast_factor" description: "A float multiplier for adjusting contrast." type: DT_FLOAT } output_arg { name: "output" description: "The contrast-adjusted image or images." type: DT_FLOAT } -} -- | Adjust the hue of one or more images. -- -- `images` is a tensor of at least 3 dimensions. The last dimension is -- interpretted as channels, and must be three. -- -- The input image is considered in the RGB colorspace. Conceptually, the RGB -- colors are first mapped into HSV. A delta is then applied all the hue values, -- and then remapped back to RGB colorspace. adjustHue :: Tensor v'1 Float -- ^ __images__: Images to adjust. At least 3-D. -> Tensor v'2 Float -- ^ __delta__: A float delta to add to the hue. -> Tensor Build Float -- ^ __output__: The hue-adjusted image or images. adjustHue = adjustHue' id adjustHue' :: OpParams -> Tensor v'1 Float -- ^ __images__: Images to adjust. At least 3-D. -> Tensor v'2 Float -- ^ __delta__: A float delta to add to the hue. -> Tensor Build Float -- ^ __output__: The hue-adjusted image or images. adjustHue' op'options images delta | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs images, buildInputs delta] return (opDef "AdjustHue" & op'options & opInputs .~ op'inputs) {- input_arg { name: "images" description: "Images to adjust. At least 3-D." type: DT_FLOAT } input_arg { name: "delta" description: "A float delta to add to the hue." type: DT_FLOAT } output_arg { name: "output" description: "The hue-adjusted image or images." type: DT_FLOAT } -} -- | Adjust the saturation of one or more images. -- -- `images` is a tensor of at least 3 dimensions. The last dimension is -- interpretted as channels, and must be three. -- -- The input image is considered in the RGB colorspace. Conceptually, the RGB -- colors are first mapped into HSV. A scale is then applied all the saturation -- values, and then remapped back to RGB colorspace. adjustSaturation :: Tensor v'1 Float -- ^ __images__: Images to adjust. At least 3-D. -> Tensor v'2 Float -- ^ __scale__: A float scale to add to the saturation. -> Tensor Build Float -- ^ __output__: The hue-adjusted image or images. adjustSaturation = adjustSaturation' id adjustSaturation' :: OpParams -> Tensor v'1 Float -- ^ __images__: Images to adjust. At least 3-D. -> Tensor v'2 Float -- ^ __scale__: A float scale to add to the saturation. -> Tensor Build Float -- ^ __output__: The hue-adjusted image or images. adjustSaturation' op'options images scale | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs images, buildInputs scale] return (opDef "AdjustSaturation" & op'options & opInputs .~ op'inputs) {- input_arg { name: "images" description: "Images to adjust. At least 3-D." type: DT_FLOAT } input_arg { name: "scale" description: "A float scale to add to the saturation." type: DT_FLOAT } output_arg { name: "output" description: "The hue-adjusted image or images." type: DT_FLOAT } -} -- | Computes the "logical and" of elements across dimensions of a tensor. -- -- Reduces `input` along the dimensions given in `reduction_indices`. Unless -- `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in -- `reduction_indices`. If `keep_dims` is true, the reduced dimensions are -- retained with length 1. all :: forall v'1 v'2 tidx . (OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => Tensor v'1 Bool -- ^ __input__: The tensor to reduce. -> Tensor v'2 tidx -- ^ __reduction_indices__: The dimensions to reduce. -> Tensor Build Bool -- ^ __output__: The reduced tensor. all = all' id all' :: forall v'1 v'2 tidx . (OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => OpParams -> Tensor v'1 Bool -- ^ __input__: The tensor to reduce. -> Tensor v'2 tidx -- ^ __reduction_indices__: The dimensions to reduce. -> Tensor Build Bool -- ^ __output__: The reduced tensor. all' op'options input reduction_indices | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs reduction_indices] return (opDef "All" & opAttr "Tidx" .~ tensorType (undefined :: tidx) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "The tensor to reduce." type: DT_BOOL } input_arg { name: "reduction_indices" description: "The dimensions to reduce." type_attr: "Tidx" } output_arg { name: "output" description: "The reduced tensor." type: DT_BOOL } attr { name: "keep_dims" type: "bool" default_value { b: false } description: "If true, retain reduced dimensions with length 1." } attr { name: "Tidx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | Generates labels for candidate sampling with a learned unigram distribution. -- -- See explanations of candidate sampling and the data formats at -- go/candidate-sampling. -- -- For each batch, this op picks a single set of sampled candidate labels. -- -- The advantages of sampling candidates per-batch are simplicity and the -- possibility of efficient dense matrix multiplication. The disadvantage is that -- the sampled candidates must be chosen independently of the context and of the -- true labels. allCandidateSampler :: Data.Int.Int64 -- ^ __num_sampled__: Number of candidates to produce per batch. -> Data.Int.Int64 -- ^ __num_true__: Number of true labels per context. -> Bool -- ^ __unique__: If unique is true, we sample with rejection, so that all sampled -- candidates in a batch are unique. This requires some approximation to -- estimate the post-rejection sampling probabilities. -> Tensor v'1 Data.Int.Int64 -- ^ __true_classes__: A batch_size * num_true matrix, in which each row contains the -- IDs of the num_true target_classes in the corresponding original label. -> (Tensor Build Data.Int.Int64, Tensor Build Float, Tensor Build Float) -- ^ (__sampled_candidates__, __true_expected_count__, __sampled_expected_count__) -- -- * __sampled_candidates__: A vector of length num_sampled, in which each element is -- the ID of a sampled candidate. -- -- * __true_expected_count__: A batch_size * num_true matrix, representing -- the number of times each candidate is expected to occur in a batch -- of sampled candidates. If unique=true, then this is a probability. -- -- * __sampled_expected_count__: A vector of length num_sampled, for each sampled -- candidate representing the number of times the candidate is expected -- to occur in a batch of sampled candidates. If unique=true, then this is a -- probability. allCandidateSampler = allCandidateSampler' id allCandidateSampler' :: OpParams -> Data.Int.Int64 -- ^ __num_sampled__: Number of candidates to produce per batch. -> Data.Int.Int64 -- ^ __num_true__: Number of true labels per context. -> Bool -- ^ __unique__: If unique is true, we sample with rejection, so that all sampled -- candidates in a batch are unique. This requires some approximation to -- estimate the post-rejection sampling probabilities. -> Tensor v'1 Data.Int.Int64 -- ^ __true_classes__: A batch_size * num_true matrix, in which each row contains the -- IDs of the num_true target_classes in the corresponding original label. -> (Tensor Build Data.Int.Int64, Tensor Build Float, Tensor Build Float) -- ^ (__sampled_candidates__, __true_expected_count__, __sampled_expected_count__) -- -- * __sampled_candidates__: A vector of length num_sampled, in which each element is -- the ID of a sampled candidate. -- -- * __true_expected_count__: A batch_size * num_true matrix, representing -- the number of times each candidate is expected to occur in a batch -- of sampled candidates. If unique=true, then this is a probability. -- -- * __sampled_expected_count__: A vector of length num_sampled, for each sampled -- candidate representing the number of times the candidate is expected -- to occur in a batch of sampled candidates. If unique=true, then this is a -- probability. allCandidateSampler' op'options num_sampled num_true unique true_classes | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs true_classes] return (opDef "AllCandidateSampler" & opAttr "num_sampled" .~ num_sampled & opAttr "num_true" .~ num_true & opAttr "unique" .~ unique & op'options & opInputs .~ op'inputs) {- input_arg { name: "true_classes" description: "A batch_size * num_true matrix, in which each row contains the\nIDs of the num_true target_classes in the corresponding original label." type: DT_INT64 } output_arg { name: "sampled_candidates" description: "A vector of length num_sampled, in which each element is\nthe ID of a sampled candidate." type: DT_INT64 } output_arg { name: "true_expected_count" description: "A batch_size * num_true matrix, representing\nthe number of times each candidate is expected to occur in a batch\nof sampled candidates. If unique=true, then this is a probability." type: DT_FLOAT } output_arg { name: "sampled_expected_count" description: "A vector of length num_sampled, for each sampled\ncandidate representing the number of times the candidate is expected\nto occur in a batch of sampled candidates. If unique=true, then this is a\nprobability." type: DT_FLOAT } attr { name: "num_true" type: "int" description: "Number of true labels per context." has_minimum: true minimum: 1 } attr { name: "num_sampled" type: "int" description: "Number of candidates to produce per batch." has_minimum: true minimum: 1 } attr { name: "unique" type: "bool" description: "If unique is true, we sample with rejection, so that all sampled\ncandidates in a batch are unique. This requires some approximation to\nestimate the post-rejection sampling probabilities." } attr { name: "seed" type: "int" default_value { i: 0 } description: "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed." } attr { name: "seed2" type: "int" default_value { i: 0 } description: "An second seed to avoid seed collision." } -} -- | Computes the "logical or" of elements across dimensions of a tensor. -- -- Reduces `input` along the dimensions given in `reduction_indices`. Unless -- `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in -- `reduction_indices`. If `keep_dims` is true, the reduced dimensions are -- retained with length 1. any :: forall v'1 v'2 tidx . (OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => Tensor v'1 Bool -- ^ __input__: The tensor to reduce. -> Tensor v'2 tidx -- ^ __reduction_indices__: The dimensions to reduce. -> Tensor Build Bool -- ^ __output__: The reduced tensor. any = any' id any' :: forall v'1 v'2 tidx . (OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => OpParams -> Tensor v'1 Bool -- ^ __input__: The tensor to reduce. -> Tensor v'2 tidx -- ^ __reduction_indices__: The dimensions to reduce. -> Tensor Build Bool -- ^ __output__: The reduced tensor. any' op'options input reduction_indices | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs reduction_indices] return (opDef "Any" & opAttr "Tidx" .~ tensorType (undefined :: tidx) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "The tensor to reduce." type: DT_BOOL } input_arg { name: "reduction_indices" description: "The dimensions to reduce." type_attr: "Tidx" } output_arg { name: "output" description: "The reduced tensor." type: DT_BOOL } attr { name: "keep_dims" type: "bool" default_value { b: false } description: "If true, retain reduced dimensions with length 1." } attr { name: "Tidx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | Update '*var' according to the adadelta scheme. -- -- accum = rho() * accum + (1 - rho()) * grad.square(); -- update = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad; -- update_accum = rho() * update_accum + (1 - rho()) * update.square(); -- var -= update; applyAdadelta :: forall v'4 v'5 v'6 v'7 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor Ref t -- ^ __var__: Should be from a Variable(). -> Tensor Ref t -- ^ __accum__: Should be from a Variable(). -> Tensor Ref t -- ^ __accum_update__: Should be from a Variable(). -> Tensor v'4 t -- ^ __lr__: Scaling factor. Must be a scalar. -> Tensor v'5 t -- ^ __rho__: Decay factor. Must be a scalar. -> Tensor v'6 t -- ^ __epsilon__: Constant factor. Must be a scalar. -> Tensor v'7 t -- ^ __grad__: The gradient. -> m' (Tensor Ref t) -- ^ __out__: Same as "var". applyAdadelta = applyAdadelta' id applyAdadelta' :: forall v'4 v'5 v'6 v'7 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor Ref t -- ^ __var__: Should be from a Variable(). -> Tensor Ref t -- ^ __accum__: Should be from a Variable(). -> Tensor Ref t -- ^ __accum_update__: Should be from a Variable(). -> Tensor v'4 t -- ^ __lr__: Scaling factor. Must be a scalar. -> Tensor v'5 t -- ^ __rho__: Decay factor. Must be a scalar. -> Tensor v'6 t -- ^ __epsilon__: Constant factor. Must be a scalar. -> Tensor v'7 t -- ^ __grad__: The gradient. -> m' (Tensor Ref t) -- ^ __out__: Same as "var". applyAdadelta' op'options var accum accum_update lr rho epsilon grad | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs accum, buildInputs accum_update, buildInputs lr, buildInputs rho, buildInputs epsilon, buildInputs grad] buildOp [] (opDef "ApplyAdadelta" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "accum" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "accum_update" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "lr" description: "Scaling factor. Must be a scalar." type_attr: "T" } input_arg { name: "rho" description: "Decay factor. Must be a scalar." type_attr: "T" } input_arg { name: "epsilon" description: "Constant factor. Must be a scalar." type_attr: "T" } input_arg { name: "grad" description: "The gradient." type_attr: "T" } output_arg { name: "out" description: "Same as \"var\"." type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If True, updating of the var, accum and update_accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention." } -} -- | Update '*var' according to the adagrad scheme. -- -- accum += grad * grad -- var -= lr * grad * (1 / sqrt(accum)) applyAdagrad :: forall v'3 v'4 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor Ref t -- ^ __var__: Should be from a Variable(). -> Tensor Ref t -- ^ __accum__: Should be from a Variable(). -> Tensor v'3 t -- ^ __lr__: Scaling factor. Must be a scalar. -> Tensor v'4 t -- ^ __grad__: The gradient. -> m' (Tensor Ref t) -- ^ __out__: Same as "var". applyAdagrad = applyAdagrad' id applyAdagrad' :: forall v'3 v'4 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor Ref t -- ^ __var__: Should be from a Variable(). -> Tensor Ref t -- ^ __accum__: Should be from a Variable(). -> Tensor v'3 t -- ^ __lr__: Scaling factor. Must be a scalar. -> Tensor v'4 t -- ^ __grad__: The gradient. -> m' (Tensor Ref t) -- ^ __out__: Same as "var". applyAdagrad' op'options var accum lr grad | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs accum, buildInputs lr, buildInputs grad] buildOp [] (opDef "ApplyAdagrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "accum" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "lr" description: "Scaling factor. Must be a scalar." type_attr: "T" } input_arg { name: "grad" description: "The gradient." type_attr: "T" } output_arg { name: "out" description: "Same as \"var\"." type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention." } -} -- | Update '*var' according to the proximal adagrad scheme. applyAdagradDA :: forall v'4 v'5 v'6 v'7 v'8 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor Ref t -- ^ __var__: Should be from a Variable(). -> Tensor Ref t -- ^ __gradient_accumulator__: Should be from a Variable(). -> Tensor Ref t -- ^ __gradient_squared_accumulator__: Should be from a Variable(). -> Tensor v'4 t -- ^ __grad__: The gradient. -> Tensor v'5 t -- ^ __lr__: Scaling factor. Must be a scalar. -> Tensor v'6 t -- ^ __l1__: L1 regularization. Must be a scalar. -> Tensor v'7 t -- ^ __l2__: L2 regularization. Must be a scalar. -> Tensor v'8 Data.Int.Int64 -- ^ __global_step__: Training step number. Must be a scalar. -> m' (Tensor Ref t) -- ^ __out__: Same as "var". applyAdagradDA = applyAdagradDA' id applyAdagradDA' :: forall v'4 v'5 v'6 v'7 v'8 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor Ref t -- ^ __var__: Should be from a Variable(). -> Tensor Ref t -- ^ __gradient_accumulator__: Should be from a Variable(). -> Tensor Ref t -- ^ __gradient_squared_accumulator__: Should be from a Variable(). -> Tensor v'4 t -- ^ __grad__: The gradient. -> Tensor v'5 t -- ^ __lr__: Scaling factor. Must be a scalar. -> Tensor v'6 t -- ^ __l1__: L1 regularization. Must be a scalar. -> Tensor v'7 t -- ^ __l2__: L2 regularization. Must be a scalar. -> Tensor v'8 Data.Int.Int64 -- ^ __global_step__: Training step number. Must be a scalar. -> m' (Tensor Ref t) -- ^ __out__: Same as "var". applyAdagradDA' op'options var gradient_accumulator gradient_squared_accumulator grad lr l1 l2 global_step | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs gradient_accumulator, buildInputs gradient_squared_accumulator, buildInputs grad, buildInputs lr, buildInputs l1, buildInputs l2, buildInputs global_step] buildOp [] (opDef "ApplyAdagradDA" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "gradient_accumulator" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "gradient_squared_accumulator" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "grad" description: "The gradient." type_attr: "T" } input_arg { name: "lr" description: "Scaling factor. Must be a scalar." type_attr: "T" } input_arg { name: "l1" description: "L1 regularization. Must be a scalar." type_attr: "T" } input_arg { name: "l2" description: "L2 regularization. Must be a scalar." type_attr: "T" } input_arg { name: "global_step" description: "Training step number. Must be a scalar." type: DT_INT64 } output_arg { name: "out" description: "Same as \"var\"." type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention." } -} -- | Update '*var' according to the Adam algorithm. -- -- lr_t <- learning_rate * sqrt(1 - beta2^t) / (1 - beta1^t) -- m_t <- beta1 * m_{t-1} + (1 - beta1) * g_t -- v_t <- beta2 * v_{t-1} + (1 - beta2) * g_t * g_t -- variable <- variable - lr_t * m_t / (sqrt(v_t) + epsilon) applyAdam :: forall v'4 v'5 v'6 v'7 v'8 v'9 v'10 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor Ref t -- ^ __var__: Should be from a Variable(). -> Tensor Ref t -- ^ __m__: Should be from a Variable(). -> Tensor Ref t -- ^ __v__: Should be from a Variable(). -> Tensor v'4 t -- ^ __beta1_power__: Must be a scalar. -> Tensor v'5 t -- ^ __beta2_power__: Must be a scalar. -> Tensor v'6 t -- ^ __lr__: Scaling factor. Must be a scalar. -> Tensor v'7 t -- ^ __beta1__: Momentum factor. Must be a scalar. -> Tensor v'8 t -- ^ __beta2__: Momentum factor. Must be a scalar. -> Tensor v'9 t -- ^ __epsilon__: Ridge term. Must be a scalar. -> Tensor v'10 t -- ^ __grad__: The gradient. -> m' (Tensor Ref t) -- ^ __out__: Same as "var". applyAdam = applyAdam' id applyAdam' :: forall v'4 v'5 v'6 v'7 v'8 v'9 v'10 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor Ref t -- ^ __var__: Should be from a Variable(). -> Tensor Ref t -- ^ __m__: Should be from a Variable(). -> Tensor Ref t -- ^ __v__: Should be from a Variable(). -> Tensor v'4 t -- ^ __beta1_power__: Must be a scalar. -> Tensor v'5 t -- ^ __beta2_power__: Must be a scalar. -> Tensor v'6 t -- ^ __lr__: Scaling factor. Must be a scalar. -> Tensor v'7 t -- ^ __beta1__: Momentum factor. Must be a scalar. -> Tensor v'8 t -- ^ __beta2__: Momentum factor. Must be a scalar. -> Tensor v'9 t -- ^ __epsilon__: Ridge term. Must be a scalar. -> Tensor v'10 t -- ^ __grad__: The gradient. -> m' (Tensor Ref t) -- ^ __out__: Same as "var". applyAdam' op'options var m v beta1_power beta2_power lr beta1 beta2 epsilon grad | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs m, buildInputs v, buildInputs beta1_power, buildInputs beta2_power, buildInputs lr, buildInputs beta1, buildInputs beta2, buildInputs epsilon, buildInputs grad] buildOp [] (opDef "ApplyAdam" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "m" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "v" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "beta1_power" description: "Must be a scalar." type_attr: "T" } input_arg { name: "beta2_power" description: "Must be a scalar." type_attr: "T" } input_arg { name: "lr" description: "Scaling factor. Must be a scalar." type_attr: "T" } input_arg { name: "beta1" description: "Momentum factor. Must be a scalar." type_attr: "T" } input_arg { name: "beta2" description: "Momentum factor. Must be a scalar." type_attr: "T" } input_arg { name: "epsilon" description: "Ridge term. Must be a scalar." type_attr: "T" } input_arg { name: "grad" description: "The gradient." type_attr: "T" } output_arg { name: "out" description: "Same as \"var\"." type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If `True`, updating of the var, m, and v tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention." } -} -- | Update '*var' according to the centered RMSProp algorithm. -- -- The centered RMSProp algorithm uses an estimate of the centered second moment -- (i.e., the variance) for normalization, as opposed to regular RMSProp, which -- uses the (uncentered) second moment. This often helps with training, but is -- slightly more expensive in terms of computation and memory. -- -- Note that in dense implementation of this algorithm, mg, ms, and mom will -- update even if the grad is zero, but in this sparse implementation, mg, ms, -- and mom will not update in iterations during which the grad is zero. -- -- mean_square = decay * mean_square + (1-decay) * gradient ** 2 -- mean_grad = decay * mean_grad + (1-decay) * gradient -- -- Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2) -- -- mg <- rho * mg_{t-1} + (1-rho) * grad -- ms <- rho * ms_{t-1} + (1-rho) * grad * grad -- mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon) -- var <- var - mom applyCenteredRMSProp :: forall v'5 v'6 v'7 v'8 v'9 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor Ref t -- ^ __var__: Should be from a Variable(). -> Tensor Ref t -- ^ __mg__: Should be from a Variable(). -> Tensor Ref t -- ^ __ms__: Should be from a Variable(). -> Tensor Ref t -- ^ __mom__: Should be from a Variable(). -> Tensor v'5 t -- ^ __lr__: Scaling factor. Must be a scalar. -> Tensor v'6 t -- ^ __rho__: Decay rate. Must be a scalar. -> Tensor v'7 t -- ^ __momentum__ -> Tensor v'8 t -- ^ __epsilon__: Ridge term. Must be a scalar. -> Tensor v'9 t -- ^ __grad__: The gradient. -> m' (Tensor Ref t) -- ^ __out__: Same as "var". applyCenteredRMSProp = applyCenteredRMSProp' id applyCenteredRMSProp' :: forall v'5 v'6 v'7 v'8 v'9 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor Ref t -- ^ __var__: Should be from a Variable(). -> Tensor Ref t -- ^ __mg__: Should be from a Variable(). -> Tensor Ref t -- ^ __ms__: Should be from a Variable(). -> Tensor Ref t -- ^ __mom__: Should be from a Variable(). -> Tensor v'5 t -- ^ __lr__: Scaling factor. Must be a scalar. -> Tensor v'6 t -- ^ __rho__: Decay rate. Must be a scalar. -> Tensor v'7 t -- ^ __momentum__ -> Tensor v'8 t -- ^ __epsilon__: Ridge term. Must be a scalar. -> Tensor v'9 t -- ^ __grad__: The gradient. -> m' (Tensor Ref t) -- ^ __out__: Same as "var". applyCenteredRMSProp' op'options var mg ms mom lr rho momentum epsilon grad | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs mg, buildInputs ms, buildInputs mom, buildInputs lr, buildInputs rho, buildInputs momentum, buildInputs epsilon, buildInputs grad] buildOp [] (opDef "ApplyCenteredRMSProp" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "mg" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "ms" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "mom" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "lr" description: "Scaling factor. Must be a scalar." type_attr: "T" } input_arg { name: "rho" description: "Decay rate. Must be a scalar." type_attr: "T" } input_arg { name: "momentum" type_attr: "T" } input_arg { name: "epsilon" description: "Ridge term. Must be a scalar." type_attr: "T" } input_arg { name: "grad" description: "The gradient." type_attr: "T" } output_arg { name: "out" description: "Same as \"var\"." type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If `True`, updating of the var, mg, ms, and mom tensors is\nprotected by a lock; otherwise the behavior is undefined, but may exhibit less\ncontention." } -} -- | Update '*var' according to the Ftrl-proximal scheme. -- -- accum_new = accum + grad * grad -- linear += grad + (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var -- quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 -- var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 -- accum = accum_new applyFtrl :: forall v'4 v'5 v'6 v'7 v'8 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor Ref t -- ^ __var__: Should be from a Variable(). -> Tensor Ref t -- ^ __accum__: Should be from a Variable(). -> Tensor Ref t -- ^ __linear__: Should be from a Variable(). -> Tensor v'4 t -- ^ __grad__: The gradient. -> Tensor v'5 t -- ^ __lr__: Scaling factor. Must be a scalar. -> Tensor v'6 t -- ^ __l1__: L1 regulariation. Must be a scalar. -> Tensor v'7 t -- ^ __l2__: L2 regulariation. Must be a scalar. -> Tensor v'8 t -- ^ __lr_power__: Scaling factor. Must be a scalar. -> m' (Tensor Ref t) -- ^ __out__: Same as "var". applyFtrl = applyFtrl' id applyFtrl' :: forall v'4 v'5 v'6 v'7 v'8 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor Ref t -- ^ __var__: Should be from a Variable(). -> Tensor Ref t -- ^ __accum__: Should be from a Variable(). -> Tensor Ref t -- ^ __linear__: Should be from a Variable(). -> Tensor v'4 t -- ^ __grad__: The gradient. -> Tensor v'5 t -- ^ __lr__: Scaling factor. Must be a scalar. -> Tensor v'6 t -- ^ __l1__: L1 regulariation. Must be a scalar. -> Tensor v'7 t -- ^ __l2__: L2 regulariation. Must be a scalar. -> Tensor v'8 t -- ^ __lr_power__: Scaling factor. Must be a scalar. -> m' (Tensor Ref t) -- ^ __out__: Same as "var". applyFtrl' op'options var accum linear grad lr l1 l2 lr_power | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs accum, buildInputs linear, buildInputs grad, buildInputs lr, buildInputs l1, buildInputs l2, buildInputs lr_power] buildOp [] (opDef "ApplyFtrl" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "accum" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "linear" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "grad" description: "The gradient." type_attr: "T" } input_arg { name: "lr" description: "Scaling factor. Must be a scalar." type_attr: "T" } input_arg { name: "l1" description: "L1 regulariation. Must be a scalar." type_attr: "T" } input_arg { name: "l2" description: "L2 regulariation. Must be a scalar." type_attr: "T" } input_arg { name: "lr_power" description: "Scaling factor. Must be a scalar." type_attr: "T" } output_arg { name: "out" description: "Same as \"var\"." type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention." } -} -- | Update '*var' by subtracting 'alpha' * 'delta' from it. applyGradientDescent :: forall v'2 v'3 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor Ref t -- ^ __var__: Should be from a Variable(). -> Tensor v'2 t -- ^ __alpha__: Scaling factor. Must be a scalar. -> Tensor v'3 t -- ^ __delta__: The change. -> m' (Tensor Ref t) -- ^ __out__: Same as "var". applyGradientDescent = applyGradientDescent' id applyGradientDescent' :: forall v'2 v'3 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor Ref t -- ^ __var__: Should be from a Variable(). -> Tensor v'2 t -- ^ __alpha__: Scaling factor. Must be a scalar. -> Tensor v'3 t -- ^ __delta__: The change. -> m' (Tensor Ref t) -- ^ __out__: Same as "var". applyGradientDescent' op'options var alpha delta | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs alpha, buildInputs delta] buildOp [] (opDef "ApplyGradientDescent" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "alpha" description: "Scaling factor. Must be a scalar." type_attr: "T" } input_arg { name: "delta" description: "The change." type_attr: "T" } output_arg { name: "out" description: "Same as \"var\"." type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If `True`, the subtraction will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention." } -} -- | Update '*var' according to the momentum scheme. Set use_nesterov = True if you -- -- want to use Nesterov momentum. -- -- accum = accum * momentum + grad -- var -= lr * accum applyMomentum :: forall v'3 v'4 v'5 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor Ref t -- ^ __var__: Should be from a Variable(). -> Tensor Ref t -- ^ __accum__: Should be from a Variable(). -> Tensor v'3 t -- ^ __lr__: Scaling factor. Must be a scalar. -> Tensor v'4 t -- ^ __grad__: The gradient. -> Tensor v'5 t -- ^ __momentum__: Momentum. Must be a scalar. -> m' (Tensor Ref t) -- ^ __out__: Same as "var". applyMomentum = applyMomentum' id applyMomentum' :: forall v'3 v'4 v'5 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor Ref t -- ^ __var__: Should be from a Variable(). -> Tensor Ref t -- ^ __accum__: Should be from a Variable(). -> Tensor v'3 t -- ^ __lr__: Scaling factor. Must be a scalar. -> Tensor v'4 t -- ^ __grad__: The gradient. -> Tensor v'5 t -- ^ __momentum__: Momentum. Must be a scalar. -> m' (Tensor Ref t) -- ^ __out__: Same as "var". applyMomentum' op'options var accum lr grad momentum | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs accum, buildInputs lr, buildInputs grad, buildInputs momentum] buildOp [] (opDef "ApplyMomentum" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "accum" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "lr" description: "Scaling factor. Must be a scalar." type_attr: "T" } input_arg { name: "grad" description: "The gradient." type_attr: "T" } input_arg { name: "momentum" description: "Momentum. Must be a scalar." type_attr: "T" } output_arg { name: "out" description: "Same as \"var\"." type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention." } attr { name: "use_nesterov" type: "bool" default_value { b: false } description: "If `True`, the tensor passed to compute grad will be\nvar - lr * momentum * accum, so in the end, the var you get is actually\nvar - lr * momentum * accum." } -} -- | Update '*var' and '*accum' according to FOBOS with Adagrad learning rate. -- -- accum += grad * grad -- prox_v = var - lr * grad * (1 / sqrt(accum)) -- var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0} applyProximalAdagrad :: forall v'3 v'4 v'5 v'6 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor Ref t -- ^ __var__: Should be from a Variable(). -> Tensor Ref t -- ^ __accum__: Should be from a Variable(). -> Tensor v'3 t -- ^ __lr__: Scaling factor. Must be a scalar. -> Tensor v'4 t -- ^ __l1__: L1 regularization. Must be a scalar. -> Tensor v'5 t -- ^ __l2__: L2 regularization. Must be a scalar. -> Tensor v'6 t -- ^ __grad__: The gradient. -> m' (Tensor Ref t) -- ^ __out__: Same as "var". applyProximalAdagrad = applyProximalAdagrad' id applyProximalAdagrad' :: forall v'3 v'4 v'5 v'6 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor Ref t -- ^ __var__: Should be from a Variable(). -> Tensor Ref t -- ^ __accum__: Should be from a Variable(). -> Tensor v'3 t -- ^ __lr__: Scaling factor. Must be a scalar. -> Tensor v'4 t -- ^ __l1__: L1 regularization. Must be a scalar. -> Tensor v'5 t -- ^ __l2__: L2 regularization. Must be a scalar. -> Tensor v'6 t -- ^ __grad__: The gradient. -> m' (Tensor Ref t) -- ^ __out__: Same as "var". applyProximalAdagrad' op'options var accum lr l1 l2 grad | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs accum, buildInputs lr, buildInputs l1, buildInputs l2, buildInputs grad] buildOp [] (opDef "ApplyProximalAdagrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "accum" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "lr" description: "Scaling factor. Must be a scalar." type_attr: "T" } input_arg { name: "l1" description: "L1 regularization. Must be a scalar." type_attr: "T" } input_arg { name: "l2" description: "L2 regularization. Must be a scalar." type_attr: "T" } input_arg { name: "grad" description: "The gradient." type_attr: "T" } output_arg { name: "out" description: "Same as \"var\"." type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention." } -} -- | Update '*var' as FOBOS algorithm with fixed learning rate. -- -- prox_v = var - alpha * delta -- var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0} applyProximalGradientDescent :: forall v'2 v'3 v'4 v'5 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor Ref t -- ^ __var__: Should be from a Variable(). -> Tensor v'2 t -- ^ __alpha__: Scaling factor. Must be a scalar. -> Tensor v'3 t -- ^ __l1__: L1 regularization. Must be a scalar. -> Tensor v'4 t -- ^ __l2__: L2 regularization. Must be a scalar. -> Tensor v'5 t -- ^ __delta__: The change. -> m' (Tensor Ref t) -- ^ __out__: Same as "var". applyProximalGradientDescent = applyProximalGradientDescent' id applyProximalGradientDescent' :: forall v'2 v'3 v'4 v'5 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor Ref t -- ^ __var__: Should be from a Variable(). -> Tensor v'2 t -- ^ __alpha__: Scaling factor. Must be a scalar. -> Tensor v'3 t -- ^ __l1__: L1 regularization. Must be a scalar. -> Tensor v'4 t -- ^ __l2__: L2 regularization. Must be a scalar. -> Tensor v'5 t -- ^ __delta__: The change. -> m' (Tensor Ref t) -- ^ __out__: Same as "var". applyProximalGradientDescent' op'options var alpha l1 l2 delta | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs alpha, buildInputs l1, buildInputs l2, buildInputs delta] buildOp [] (opDef "ApplyProximalGradientDescent" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "alpha" description: "Scaling factor. Must be a scalar." type_attr: "T" } input_arg { name: "l1" description: "L1 regularization. Must be a scalar." type_attr: "T" } input_arg { name: "l2" description: "L2 regularization. Must be a scalar." type_attr: "T" } input_arg { name: "delta" description: "The change." type_attr: "T" } output_arg { name: "out" description: "Same as \"var\"." type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If True, the subtraction will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention." } -} -- | Update '*var' according to the RMSProp algorithm. -- -- Note that in dense implementation of this algorithm, ms and mom will -- update even if the grad is zero, but in this sparse implementation, ms -- and mom will not update in iterations during which the grad is zero. -- -- mean_square = decay * mean_square + (1-decay) * gradient ** 2 -- Delta = learning_rate * gradient / sqrt(mean_square + epsilon) -- -- ms <- rho * ms_{t-1} + (1-rho) * grad * grad -- mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) -- var <- var - mom applyRMSProp :: forall v'4 v'5 v'6 v'7 v'8 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor Ref t -- ^ __var__: Should be from a Variable(). -> Tensor Ref t -- ^ __ms__: Should be from a Variable(). -> Tensor Ref t -- ^ __mom__: Should be from a Variable(). -> Tensor v'4 t -- ^ __lr__: Scaling factor. Must be a scalar. -> Tensor v'5 t -- ^ __rho__: Decay rate. Must be a scalar. -> Tensor v'6 t -- ^ __momentum__ -> Tensor v'7 t -- ^ __epsilon__: Ridge term. Must be a scalar. -> Tensor v'8 t -- ^ __grad__: The gradient. -> m' (Tensor Ref t) -- ^ __out__: Same as "var". applyRMSProp = applyRMSProp' id applyRMSProp' :: forall v'4 v'5 v'6 v'7 v'8 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor Ref t -- ^ __var__: Should be from a Variable(). -> Tensor Ref t -- ^ __ms__: Should be from a Variable(). -> Tensor Ref t -- ^ __mom__: Should be from a Variable(). -> Tensor v'4 t -- ^ __lr__: Scaling factor. Must be a scalar. -> Tensor v'5 t -- ^ __rho__: Decay rate. Must be a scalar. -> Tensor v'6 t -- ^ __momentum__ -> Tensor v'7 t -- ^ __epsilon__: Ridge term. Must be a scalar. -> Tensor v'8 t -- ^ __grad__: The gradient. -> m' (Tensor Ref t) -- ^ __out__: Same as "var". applyRMSProp' op'options var ms mom lr rho momentum epsilon grad | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs ms, buildInputs mom, buildInputs lr, buildInputs rho, buildInputs momentum, buildInputs epsilon, buildInputs grad] buildOp [] (opDef "ApplyRMSProp" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "ms" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "mom" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "lr" description: "Scaling factor. Must be a scalar." type_attr: "T" } input_arg { name: "rho" description: "Decay rate. Must be a scalar." type_attr: "T" } input_arg { name: "momentum" type_attr: "T" } input_arg { name: "epsilon" description: "Ridge term. Must be a scalar." type_attr: "T" } input_arg { name: "grad" description: "The gradient." type_attr: "T" } output_arg { name: "out" description: "Same as \"var\"." type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If `True`, updating of the var, ms, and mom tensors is protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention." } -} -- | Returns the truth value of abs(x-y) < tolerance element-wise. approximateEqual :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build Bool -- ^ __z__ approximateEqual = approximateEqual' id approximateEqual' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build Bool -- ^ __z__ approximateEqual' op'options x y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y] return (opDef "ApproximateEqual" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type: DT_BOOL } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "tolerance" type: "float" default_value { f: 1.0e-5 } } -} -- | Returns the index with the largest value across dimensions of a tensor. argMax :: forall v'1 v'2 t tidx . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 tidx -- ^ __dimension__: int32, 0 <= dimension < rank(input). Describes which dimension -- of the input Tensor to reduce across. For vectors, use dimension = 0. -> Tensor Build Data.Int.Int64 -- ^ __output__ argMax = argMax' id argMax' :: forall v'1 v'2 t tidx . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 tidx -- ^ __dimension__: int32, 0 <= dimension < rank(input). Describes which dimension -- of the input Tensor to reduce across. For vectors, use dimension = 0. -> Tensor Build Data.Int.Int64 -- ^ __output__ argMax' op'options input dimension | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs dimension] return (opDef "ArgMax" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tidx" .~ tensorType (undefined :: tidx) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "dimension" description: "int32, 0 <= dimension < rank(input). Describes which dimension\nof the input Tensor to reduce across. For vectors, use dimension = 0." type_attr: "Tidx" } output_arg { name: "output" type: DT_INT64 } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "Tidx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | Returns the index with the smallest value across dimensions of a tensor. argMin :: forall v'1 v'2 t tidx . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 tidx -- ^ __dimension__: int32, 0 <= dimension < rank(input). Describes which dimension -- of the input Tensor to reduce across. For vectors, use dimension = 0. -> Tensor Build Data.Int.Int64 -- ^ __output__ argMin = argMin' id argMin' :: forall v'1 v'2 t tidx . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 tidx -- ^ __dimension__: int32, 0 <= dimension < rank(input). Describes which dimension -- of the input Tensor to reduce across. For vectors, use dimension = 0. -> Tensor Build Data.Int.Int64 -- ^ __output__ argMin' op'options input dimension | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs dimension] return (opDef "ArgMin" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tidx" .~ tensorType (undefined :: tidx) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "dimension" description: "int32, 0 <= dimension < rank(input). Describes which dimension\nof the input Tensor to reduce across. For vectors, use dimension = 0." type_attr: "Tidx" } output_arg { name: "output" type: DT_INT64 } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "Tidx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | Converts each entry in the given tensor to strings. Supports many numeric -- -- types and boolean. asString :: forall v'1 t . (OneOf '[(Data.Complex.Complex Float), Bool, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Double, Float] t) => Tensor v'1 t -- ^ __input__ -> Tensor Build Data.ByteString.ByteString -- ^ __output__ asString = asString' id asString' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Float), Bool, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor Build Data.ByteString.ByteString -- ^ __output__ asString' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "AsString" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type: DT_STRING } attr { name: "T" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_FLOAT type: DT_DOUBLE type: DT_BOOL type: DT_INT8 } } } attr { name: "precision" type: "int" default_value { i: -1 } description: "The post-decimal precision to use for floating point numbers.\nOnly used if precision > -1." } attr { name: "scientific" type: "bool" default_value { b: false } description: "Use scientific notation for floating point numbers." } attr { name: "shortest" type: "bool" default_value { b: false } description: "Use shortest representation (either scientific or standard) for\nfloating point numbers." } attr { name: "width" type: "int" default_value { i: -1 } description: "Pad pre-decimal numbers to this width.\nApplies to both floating point and integer numbers.\nOnly used if width > -1." } attr { name: "fill" type: "string" default_value { s: "" } description: "The value to pad if width > -1. If empty, pads with spaces.\nAnother typical value is \'0\'. String cannot be longer than 1 character." } -} -- | Computes asin of x element-wise. asin :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ asin = asin' id asin' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ asin' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "Asin" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | Asserts that the given condition is true. -- -- If `condition` evaluates to false, print the list of tensors in `data`. -- `summarize` determines how many entries of the tensors to print. assert :: forall v'1 v'2 t m' . (MonadBuild m', TensorTypes t) => Tensor v'1 Bool -- ^ __condition__: The condition to evaluate. -> TensorList (v'2) t -- ^ __data__: The tensors to print out when condition is false. -> m' (ControlNode) assert = assert' id assert' :: forall v'1 v'2 t m' . (MonadBuild m', TensorTypes t) => OpParams -> Tensor v'1 Bool -- ^ __condition__: The condition to evaluate. -> TensorList (v'2) t -- ^ __data__: The tensors to print out when condition is false. -> m' (ControlNode) assert' op'options condition data' | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs condition, buildInputs data'] buildOp [] (opDef "Assert" & opAttr "T" .~ fromTensorTypes (Proxy :: Proxy t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "condition" description: "The condition to evaluate." type: DT_BOOL } input_arg { name: "data" description: "The tensors to print out when condition is false." type_list_attr: "T" } attr { name: "T" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "summarize" type: "int" default_value { i: 3 } description: "Print this many entries of each tensor." } -} -- | Update 'ref' by assigning 'value' to it. -- -- This operation outputs "ref" after the assignment is done. -- This makes it easier to chain operations that need to use the reset value. assign :: forall v'2 t m' . (MonadBuild m', TensorType t) => Tensor Ref t -- ^ __ref__: Should be from a `Variable` node. May be uninitialized. -> Tensor v'2 t -- ^ __value__: The value to be assigned to the variable. -> m' (Tensor Ref t) -- ^ __output_ref__: = Same as "ref". Returned as a convenience for operations that want -- to use the new value after the variable has been reset. assign = assign' id assign' :: forall v'2 t m' . (MonadBuild m', TensorType t) => OpParams -> Tensor Ref t -- ^ __ref__: Should be from a `Variable` node. May be uninitialized. -> Tensor v'2 t -- ^ __value__: The value to be assigned to the variable. -> m' (Tensor Ref t) -- ^ __output_ref__: = Same as "ref". Returned as a convenience for operations that want -- to use the new value after the variable has been reset. assign' op'options ref value | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs ref, buildInputs value] buildOp [] (opDef "Assign" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "ref" description: "Should be from a `Variable` node. May be uninitialized." type_attr: "T" is_ref: true } input_arg { name: "value" description: "The value to be assigned to the variable." type_attr: "T" } output_arg { name: "output_ref" description: "= Same as \"ref\". Returned as a convenience for operations that want\nto use the new value after the variable has been reset." type_attr: "T" is_ref: true } attr { name: "T" type: "type" } attr { name: "validate_shape" type: "bool" default_value { b: true } description: "If true, the operation will validate that the shape\nof \'value\' matches the shape of the Tensor being assigned to. If false,\n\'ref\' will take on the shape of \'value\'." } attr { name: "use_locking" type: "bool" default_value { b: true } description: "If True, the assignment will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention." } -} -- | Update 'ref' by adding 'value' to it. -- -- This operation outputs "ref" after the update is done. -- This makes it easier to chain operations that need to use the reset value. assignAdd :: forall v'2 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor Ref t -- ^ __ref__: Should be from a `Variable` node. -> Tensor v'2 t -- ^ __value__: The value to be added to the variable. -> m' (Tensor Ref t) -- ^ __output_ref__: = Same as "ref". Returned as a convenience for operations that want -- to use the new value after the variable has been updated. assignAdd = assignAdd' id assignAdd' :: forall v'2 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor Ref t -- ^ __ref__: Should be from a `Variable` node. -> Tensor v'2 t -- ^ __value__: The value to be added to the variable. -> m' (Tensor Ref t) -- ^ __output_ref__: = Same as "ref". Returned as a convenience for operations that want -- to use the new value after the variable has been updated. assignAdd' op'options ref value | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs ref, buildInputs value] buildOp [] (opDef "AssignAdd" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "ref" description: "Should be from a `Variable` node." type_attr: "T" is_ref: true } input_arg { name: "value" description: "The value to be added to the variable." type_attr: "T" } output_arg { name: "output_ref" description: "= Same as \"ref\". Returned as a convenience for operations that want\nto use the new value after the variable has been updated." type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If True, the addition will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention." } -} -- | Adds a value to the current value of a variable. -- -- Any ReadVariableOp which depends directly or indirectly on this assign is -- guaranteed to see the incremented value or a subsequent newer one. -- -- Outputs the incremented value, which can be used to totally order the -- increments to this variable. assignAddVariableOp :: forall v'1 v'2 dtype m' . (MonadBuild m', TensorType dtype) => Tensor v'1 ResourceHandle -- ^ __resource__: handle to the resource in which to store the variable. -> Tensor v'2 dtype -- ^ __value__: the value by which the variable will be incremented. -> m' (ControlNode) assignAddVariableOp = assignAddVariableOp' id assignAddVariableOp' :: forall v'1 v'2 dtype m' . (MonadBuild m', TensorType dtype) => OpParams -> Tensor v'1 ResourceHandle -- ^ __resource__: handle to the resource in which to store the variable. -> Tensor v'2 dtype -- ^ __value__: the value by which the variable will be incremented. -> m' (ControlNode) assignAddVariableOp' op'options resource value | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs resource, buildInputs value] buildOp [] (opDef "AssignAddVariableOp" & opAttr "dtype" .~ tensorType (undefined :: dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "resource" description: "handle to the resource in which to store the variable." type: DT_RESOURCE } input_arg { name: "value" description: "the value by which the variable will be incremented." type_attr: "dtype" } attr { name: "dtype" type: "type" description: "the dtype of the value." } -} -- | Update 'ref' by subtracting 'value' from it. -- -- This operation outputs "ref" after the update is done. -- This makes it easier to chain operations that need to use the reset value. assignSub :: forall v'2 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor Ref t -- ^ __ref__: Should be from a `Variable` node. -> Tensor v'2 t -- ^ __value__: The value to be subtracted to the variable. -> m' (Tensor Ref t) -- ^ __output_ref__: = Same as "ref". Returned as a convenience for operations that want -- to use the new value after the variable has been updated. assignSub = assignSub' id assignSub' :: forall v'2 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor Ref t -- ^ __ref__: Should be from a `Variable` node. -> Tensor v'2 t -- ^ __value__: The value to be subtracted to the variable. -> m' (Tensor Ref t) -- ^ __output_ref__: = Same as "ref". Returned as a convenience for operations that want -- to use the new value after the variable has been updated. assignSub' op'options ref value | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs ref, buildInputs value] buildOp [] (opDef "AssignSub" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "ref" description: "Should be from a `Variable` node." type_attr: "T" is_ref: true } input_arg { name: "value" description: "The value to be subtracted to the variable." type_attr: "T" } output_arg { name: "output_ref" description: "= Same as \"ref\". Returned as a convenience for operations that want\nto use the new value after the variable has been updated." type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If True, the subtraction will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention." } -} -- | Subtracts a value from the current value of a variable. -- -- Any ReadVariableOp which depends directly or indirectly on this assign is -- guaranteed to see the incremented value or a subsequent newer one. -- -- Outputs the incremented value, which can be used to totally order the -- increments to this variable. assignSubVariableOp :: forall v'1 v'2 dtype m' . (MonadBuild m', TensorType dtype) => Tensor v'1 ResourceHandle -- ^ __resource__: handle to the resource in which to store the variable. -> Tensor v'2 dtype -- ^ __value__: the value by which the variable will be incremented. -> m' (ControlNode) assignSubVariableOp = assignSubVariableOp' id assignSubVariableOp' :: forall v'1 v'2 dtype m' . (MonadBuild m', TensorType dtype) => OpParams -> Tensor v'1 ResourceHandle -- ^ __resource__: handle to the resource in which to store the variable. -> Tensor v'2 dtype -- ^ __value__: the value by which the variable will be incremented. -> m' (ControlNode) assignSubVariableOp' op'options resource value | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs resource, buildInputs value] buildOp [] (opDef "AssignSubVariableOp" & opAttr "dtype" .~ tensorType (undefined :: dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "resource" description: "handle to the resource in which to store the variable." type: DT_RESOURCE } input_arg { name: "value" description: "the value by which the variable will be incremented." type_attr: "dtype" } attr { name: "dtype" type: "type" description: "the dtype of the value." } -} -- | Assigns a new value to a variable. -- -- Any ReadVariableOp with a control dependency on this op is guaranteed to return -- this value or a subsequent newer value of the variable. assignVariableOp :: forall v'1 v'2 dtype m' . (MonadBuild m', TensorType dtype) => Tensor v'1 ResourceHandle -- ^ __resource__: handle to the resource in which to store the variable. -> Tensor v'2 dtype -- ^ __value__: the value to set the new tensor to use. -> m' (ControlNode) assignVariableOp = assignVariableOp' id assignVariableOp' :: forall v'1 v'2 dtype m' . (MonadBuild m', TensorType dtype) => OpParams -> Tensor v'1 ResourceHandle -- ^ __resource__: handle to the resource in which to store the variable. -> Tensor v'2 dtype -- ^ __value__: the value to set the new tensor to use. -> m' (ControlNode) assignVariableOp' op'options resource value | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs resource, buildInputs value] buildOp [] (opDef "AssignVariableOp" & opAttr "dtype" .~ tensorType (undefined :: dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "resource" description: "handle to the resource in which to store the variable." type: DT_RESOURCE } input_arg { name: "value" description: "the value to set the new tensor to use." type_attr: "dtype" } attr { name: "dtype" type: "type" description: "the dtype of the value." } -} -- | Computes atan of x element-wise. atan :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ atan = atan' id atan' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ atan' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "Atan" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | Outputs a `Summary` protocol buffer with audio. -- -- The summary has up to `max_outputs` summary values containing audio. The -- audio is built from `tensor` which must be 3-D with shape `[batch_size, -- frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are -- assumed to be in the range of `[-1.0, 1.0]` with a sample rate of `sample_rate`. -- -- The `tag` argument is a scalar `Tensor` of type `string`. It is used to -- build the `tag` of the summary values: -- -- * If `max_outputs` is 1, the summary value tag is '*tag*/audio'. -- * If `max_outputs` is greater than 1, the summary value tags are -- generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc. audioSummary :: Float -- ^ __sample_rate__: The sample rate of the signal in hertz. -> Tensor v'1 Data.ByteString.ByteString -- ^ __tag__: Scalar. Used to build the `tag` attribute of the summary values. -> Tensor v'2 Float -- ^ __tensor__: 2-D of shape `[batch_size, frames]`. -> Tensor Build Data.ByteString.ByteString -- ^ __summary__: Scalar. Serialized `Summary` protocol buffer. audioSummary = audioSummary' id audioSummary' :: OpParams -> Float -- ^ __sample_rate__: The sample rate of the signal in hertz. -> Tensor v'1 Data.ByteString.ByteString -- ^ __tag__: Scalar. Used to build the `tag` attribute of the summary values. -> Tensor v'2 Float -- ^ __tensor__: 2-D of shape `[batch_size, frames]`. -> Tensor Build Data.ByteString.ByteString -- ^ __summary__: Scalar. Serialized `Summary` protocol buffer. audioSummary' op'options sample_rate tag tensor | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs tag, buildInputs tensor] return (opDef "AudioSummary" & opAttr "sample_rate" .~ sample_rate & op'options & opInputs .~ op'inputs) {- input_arg { name: "tag" description: "Scalar. Used to build the `tag` attribute of the summary values." type: DT_STRING } input_arg { name: "tensor" description: "2-D of shape `[batch_size, frames]`." type: DT_FLOAT } output_arg { name: "summary" description: "Scalar. Serialized `Summary` protocol buffer." type: DT_STRING } attr { name: "sample_rate" type: "float" description: "The sample rate of the signal in hertz." } attr { name: "max_outputs" type: "int" default_value { i: 3 } description: "Max number of batch elements to generate audio for." has_minimum: true minimum: 1 } -} -- | Outputs a `Summary` protocol buffer with audio. -- -- The summary has up to `max_outputs` summary values containing audio. The -- audio is built from `tensor` which must be 3-D with shape `[batch_size, -- frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are -- assumed to be in the range of `[-1.0, 1.0]` with a sample rate of `sample_rate`. -- -- The `tag` argument is a scalar `Tensor` of type `string`. It is used to -- build the `tag` of the summary values: -- -- * If `max_outputs` is 1, the summary value tag is '*tag*/audio'. -- * If `max_outputs` is greater than 1, the summary value tags are -- generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc. audioSummaryV2 :: Tensor v'1 Data.ByteString.ByteString -- ^ __tag__: Scalar. Used to build the `tag` attribute of the summary values. -> Tensor v'2 Float -- ^ __tensor__: 2-D of shape `[batch_size, frames]`. -> Tensor v'3 Float -- ^ __sample_rate__: The sample rate of the signal in hertz. -> Tensor Build Data.ByteString.ByteString -- ^ __summary__: Scalar. Serialized `Summary` protocol buffer. audioSummaryV2 = audioSummaryV2' id audioSummaryV2' :: OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __tag__: Scalar. Used to build the `tag` attribute of the summary values. -> Tensor v'2 Float -- ^ __tensor__: 2-D of shape `[batch_size, frames]`. -> Tensor v'3 Float -- ^ __sample_rate__: The sample rate of the signal in hertz. -> Tensor Build Data.ByteString.ByteString -- ^ __summary__: Scalar. Serialized `Summary` protocol buffer. audioSummaryV2' op'options tag tensor sample_rate | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs tag, buildInputs tensor, buildInputs sample_rate] return (opDef "AudioSummaryV2" & op'options & opInputs .~ op'inputs) {- input_arg { name: "tag" description: "Scalar. Used to build the `tag` attribute of the summary values." type: DT_STRING } input_arg { name: "tensor" description: "2-D of shape `[batch_size, frames]`." type: DT_FLOAT } input_arg { name: "sample_rate" description: "The sample rate of the signal in hertz." type: DT_FLOAT } output_arg { name: "summary" description: "Scalar. Serialized `Summary` protocol buffer." type: DT_STRING } attr { name: "max_outputs" type: "int" default_value { i: 3 } description: "Max number of batch elements to generate audio for." has_minimum: true minimum: 1 } -} -- | Performs average pooling on the input. -- -- Each entry in `output` is the mean of the corresponding size `ksize` -- window in `value`. avgPool :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __value__: 4-D with shape `[batch, height, width, channels]`. -> Tensor Build t -- ^ __output__: The average pooled output tensor. avgPool = avgPool' id avgPool' :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __value__: 4-D with shape `[batch, height, width, channels]`. -> Tensor Build t -- ^ __output__: The average pooled output tensor. avgPool' op'options value | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs value] return (opDef "AvgPool" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "value" description: "4-D with shape `[batch, height, width, channels]`." type_attr: "T" } output_arg { name: "output" description: "The average pooled output tensor." type_attr: "T" } attr { name: "ksize" type: "list(int)" description: "The size of the sliding window for each dimension of `value`." has_minimum: true minimum: 4 } attr { name: "strides" type: "list(int)" description: "The stride of the sliding window for each dimension of `value`." has_minimum: true minimum: 4 } attr { name: "padding" type: "string" description: "The type of padding algorithm to use." allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "data_format" type: "string" default_value { s: "NHWC" } description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n [batch, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n [batch, in_channels, in_height, in_width]." allowed_values { list { s: "NHWC" s: "NCHW" } } } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_HALF type: DT_DOUBLE } } } -} -- | Performs 3D average pooling on the input. avgPool3D :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __input__: Shape `[batch, depth, rows, cols, channels]` tensor to pool over. -> Tensor Build t -- ^ __output__: The average pooled output tensor. avgPool3D = avgPool3D' id avgPool3D' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__: Shape `[batch, depth, rows, cols, channels]` tensor to pool over. -> Tensor Build t -- ^ __output__: The average pooled output tensor. avgPool3D' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "AvgPool3D" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "Shape `[batch, depth, rows, cols, channels]` tensor to pool over." type_attr: "T" } output_arg { name: "output" description: "The average pooled output tensor." type_attr: "T" } attr { name: "ksize" type: "list(int)" description: "1-D tensor of length 5. The size of the window for each dimension of\nthe input tensor. Must have `ksize[0] = ksize[4] = 1`." has_minimum: true minimum: 5 } attr { name: "strides" type: "list(int)" description: "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`." has_minimum: true minimum: 5 } attr { name: "padding" type: "string" description: "The type of padding algorithm to use." allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } -} -- | Computes gradients of average pooling function. avgPool3DGrad :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 Data.Int.Int32 -- ^ __orig_input_shape__: The original input dimensions. -> Tensor v'2 t -- ^ __grad__: Output backprop of shape `[batch, depth, rows, cols, channels]`. -> Tensor Build t -- ^ __output__: The backprop for input. avgPool3DGrad = avgPool3DGrad' id avgPool3DGrad' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 Data.Int.Int32 -- ^ __orig_input_shape__: The original input dimensions. -> Tensor v'2 t -- ^ __grad__: Output backprop of shape `[batch, depth, rows, cols, channels]`. -> Tensor Build t -- ^ __output__: The backprop for input. avgPool3DGrad' op'options orig_input_shape grad | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs orig_input_shape, buildInputs grad] return (opDef "AvgPool3DGrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "orig_input_shape" description: "The original input dimensions." type: DT_INT32 } input_arg { name: "grad" description: "Output backprop of shape `[batch, depth, rows, cols, channels]`." type_attr: "T" } output_arg { name: "output" description: "The backprop for input." type_attr: "T" } attr { name: "ksize" type: "list(int)" description: "1-D tensor of length 5. The size of the window for each dimension of\nthe input tensor. Must have `ksize[0] = ksize[4] = 1`." has_minimum: true minimum: 5 } attr { name: "strides" type: "list(int)" description: "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`." has_minimum: true minimum: 5 } attr { name: "padding" type: "string" description: "The type of padding algorithm to use." allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } -} -- | Computes gradients of the average pooling function. avgPoolGrad :: forall v'1 v'2 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 Data.Int.Int32 -- ^ __orig_input_shape__: 1-D. Shape of the original input to `avg_pool`. -> Tensor v'2 t -- ^ __grad__: 4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. -- the output of `avg_pool`. -> Tensor Build t -- ^ __output__: 4-D. Gradients w.r.t. the input of `avg_pool`. avgPoolGrad = avgPoolGrad' id avgPoolGrad' :: forall v'1 v'2 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 Data.Int.Int32 -- ^ __orig_input_shape__: 1-D. Shape of the original input to `avg_pool`. -> Tensor v'2 t -- ^ __grad__: 4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. -- the output of `avg_pool`. -> Tensor Build t -- ^ __output__: 4-D. Gradients w.r.t. the input of `avg_pool`. avgPoolGrad' op'options orig_input_shape grad | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs orig_input_shape, buildInputs grad] return (opDef "AvgPoolGrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "orig_input_shape" description: "1-D. Shape of the original input to `avg_pool`." type: DT_INT32 } input_arg { name: "grad" description: "4-D with shape `[batch, height, width, channels]`. Gradients w.r.t.\nthe output of `avg_pool`." type_attr: "T" } output_arg { name: "output" description: "4-D. Gradients w.r.t. the input of `avg_pool`." type_attr: "T" } attr { name: "ksize" type: "list(int)" description: "The size of the sliding window for each dimension of the input." has_minimum: true minimum: 4 } attr { name: "strides" type: "list(int)" description: "The stride of the sliding window for each dimension of the input." has_minimum: true minimum: 4 } attr { name: "padding" type: "string" description: "The type of padding algorithm to use." allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "data_format" type: "string" default_value { s: "NHWC" } description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n [batch, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n [batch, in_channels, in_height, in_width]." allowed_values { list { s: "NHWC" s: "NCHW" } } } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_HALF type: DT_DOUBLE } } } -} -- | Defines a barrier that persists across different graph executions. -- -- A barrier represents a key-value map, where each key is a string, and -- each value is a tuple of tensors. -- -- At runtime, the barrier contains 'complete' and 'incomplete' -- elements. A complete element has defined tensors for all components of -- its value tuple, and may be accessed using BarrierTakeMany. An -- incomplete element has some undefined components in its value tuple, -- and may be updated using BarrierInsertMany. barrier :: forall m' . (MonadBuild m') => [DataType] -- ^ __component_types__: The type of each component in a value. -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __handle__: The handle to the barrier. barrier = barrier' id barrier' :: forall m' . (MonadBuild m') => OpParams -> [DataType] -- ^ __component_types__: The type of each component in a value. -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __handle__: The handle to the barrier. barrier' op'options component_types | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "Barrier" & opAttr "component_types" .~ component_types & op'options & opInputs .~ op'inputs) {- output_arg { name: "handle" description: "The handle to the barrier." type: DT_STRING is_ref: true } attr { name: "component_types" type: "list(type)" description: "The type of each component in a value." has_minimum: true minimum: 1 } attr { name: "shapes" type: "list(shape)" default_value { list { } } description: "The shape of each component in a value. Each shape must be 1 in the\nfirst dimension. The length of this attr must be the same as the length of\ncomponent_types." has_minimum: true } attr { name: "capacity" type: "int" default_value { i: -1 } description: "The capacity of the barrier. The default capacity is MAX_INT32,\nwhich is the largest capacity of the underlying queue." } attr { name: "container" type: "string" default_value { s: "" } description: "If non-empty, this barrier is placed in the given container.\nOtherwise, a default container is used." } attr { name: "shared_name" type: "string" default_value { s: "" } description: "If non-empty, this barrier will be shared under the given name\nacross multiple sessions." } -} -- | Closes the given barrier. -- -- This operation signals that no more new elements will be inserted in the -- given barrier. Subsequent InsertMany that try to introduce a new key will fail. -- Subsequent InsertMany operations that just add missing components to already -- existing elements will continue to succeed. Subsequent TakeMany operations will -- continue to succeed if sufficient completed elements remain in the barrier. -- Subsequent TakeMany operations that would block will fail immediately. barrierClose :: forall m' . (MonadBuild m') => Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a barrier. -> m' (ControlNode) barrierClose = barrierClose' id barrierClose' :: forall m' . (MonadBuild m') => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a barrier. -> m' (ControlNode) barrierClose' op'options handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle] buildOp [] (opDef "BarrierClose" & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" description: "The handle to a barrier." type: DT_STRING is_ref: true } attr { name: "cancel_pending_enqueues" type: "bool" default_value { b: false } description: "If true, all pending enqueue requests that are\nblocked on the barrier\'s queue will be cancelled. InsertMany will fail, even\nif no new key is introduced." } -} -- | Computes the number of incomplete elements in the given barrier. barrierIncompleteSize :: forall m' . (MonadBuild m') => Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a barrier. -> m' (Tensor Value Data.Int.Int32) -- ^ __size__: The number of incomplete elements (i.e. those with some of their value -- components not set) in the barrier. barrierIncompleteSize = barrierIncompleteSize' id barrierIncompleteSize' :: forall m' . (MonadBuild m') => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a barrier. -> m' (Tensor Value Data.Int.Int32) -- ^ __size__: The number of incomplete elements (i.e. those with some of their value -- components not set) in the barrier. barrierIncompleteSize' op'options handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle] buildOp [] (opDef "BarrierIncompleteSize" & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" description: "The handle to a barrier." type: DT_STRING is_ref: true } output_arg { name: "size" description: "The number of incomplete elements (i.e. those with some of their value\ncomponents not set) in the barrier." type: DT_INT32 } -} -- | For each key, assigns the respective value to the specified component. -- -- If a key is not found in the barrier, this operation will create a new -- incomplete element. If a key is found in the barrier, and the element -- already has a value at component_index, this operation will fail with -- INVALID_ARGUMENT, and leave the barrier in an undefined state. barrierInsertMany :: forall v'2 v'3 t m' . (MonadBuild m', TensorType t) => Data.Int.Int64 -- ^ __component_index__: The component of the barrier elements that is being assigned. -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a barrier. -> Tensor v'2 Data.ByteString.ByteString -- ^ __keys__: A one-dimensional tensor of keys, with length n. -> Tensor v'3 t -- ^ __values__: An any-dimensional tensor of values, which are associated with the -- respective keys. The 0th dimension must have length n. -> m' (ControlNode) barrierInsertMany = barrierInsertMany' id barrierInsertMany' :: forall v'2 v'3 t m' . (MonadBuild m', TensorType t) => OpParams -> Data.Int.Int64 -- ^ __component_index__: The component of the barrier elements that is being assigned. -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a barrier. -> Tensor v'2 Data.ByteString.ByteString -- ^ __keys__: A one-dimensional tensor of keys, with length n. -> Tensor v'3 t -- ^ __values__: An any-dimensional tensor of values, which are associated with the -- respective keys. The 0th dimension must have length n. -> m' (ControlNode) barrierInsertMany' op'options component_index handle keys values | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs keys, buildInputs values] buildOp [] (opDef "BarrierInsertMany" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "component_index" .~ component_index & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" description: "The handle to a barrier." type: DT_STRING is_ref: true } input_arg { name: "keys" description: "A one-dimensional tensor of keys, with length n." type: DT_STRING } input_arg { name: "values" description: "An any-dimensional tensor of values, which are associated with the\nrespective keys. The 0th dimension must have length n." type_attr: "T" } attr { name: "T" type: "type" } attr { name: "component_index" type: "int" description: "The component of the barrier elements that is being assigned." } -} -- | Computes the number of complete elements in the given barrier. barrierReadySize :: forall m' . (MonadBuild m') => Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a barrier. -> m' (Tensor Value Data.Int.Int32) -- ^ __size__: The number of complete elements (i.e. those with all of their value -- components set) in the barrier. barrierReadySize = barrierReadySize' id barrierReadySize' :: forall m' . (MonadBuild m') => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a barrier. -> m' (Tensor Value Data.Int.Int32) -- ^ __size__: The number of complete elements (i.e. those with all of their value -- components set) in the barrier. barrierReadySize' op'options handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle] buildOp [] (opDef "BarrierReadySize" & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" description: "The handle to a barrier." type: DT_STRING is_ref: true } output_arg { name: "size" description: "The number of complete elements (i.e. those with all of their value\ncomponents set) in the barrier." type: DT_INT32 } -} -- | Takes the given number of completed elements from a barrier. -- -- This operation concatenates completed-element component tensors along -- the 0th dimension to make a single component tensor. -- -- Elements come out of the barrier when they are complete, and in the order -- in which they were placed into the barrier. The indices output provides -- information about the batch in which each element was originally inserted -- into the barrier. barrierTakeMany :: forall v'2 component_types m' . (MonadBuild m', TensorTypes component_types) => Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a barrier. -> Tensor v'2 Data.Int.Int32 -- ^ __num_elements__: A single-element tensor containing the number of elements to -- take. -> m' ((Tensor Value Data.Int.Int64, Tensor Value Data.ByteString.ByteString, TensorList (Value) component_types)) -- ^ (__indices__, __keys__, __values__) -- -- * __indices__: A one-dimensional tensor of indices, with length num_elems. -- These indices refer to the batch in which the values were placed into the -- barrier (starting with MIN_LONG and increasing with each BarrierInsertMany). -- -- * __keys__: A one-dimensional tensor of keys, with length num_elements. -- -- * __values__: One any-dimensional tensor per component in a barrier element. All -- values have length num_elements in the 0th dimension. barrierTakeMany = barrierTakeMany' id barrierTakeMany' :: forall v'2 component_types m' . (MonadBuild m', TensorTypes component_types) => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a barrier. -> Tensor v'2 Data.Int.Int32 -- ^ __num_elements__: A single-element tensor containing the number of elements to -- take. -> m' ((Tensor Value Data.Int.Int64, Tensor Value Data.ByteString.ByteString, TensorList (Value) component_types)) -- ^ (__indices__, __keys__, __values__) -- -- * __indices__: A one-dimensional tensor of indices, with length num_elems. -- These indices refer to the batch in which the values were placed into the -- barrier (starting with MIN_LONG and increasing with each BarrierInsertMany). -- -- * __keys__: A one-dimensional tensor of keys, with length num_elements. -- -- * __values__: One any-dimensional tensor per component in a barrier element. All -- values have length num_elements in the 0th dimension. barrierTakeMany' op'options handle num_elements | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs num_elements] buildOp [] (opDef "BarrierTakeMany" & opAttr "component_types" .~ fromTensorTypes (Proxy :: Proxy component_types) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" description: "The handle to a barrier." type: DT_STRING is_ref: true } input_arg { name: "num_elements" description: "A single-element tensor containing the number of elements to\ntake." type: DT_INT32 } output_arg { name: "indices" description: "A one-dimensional tensor of indices, with length num_elems.\nThese indices refer to the batch in which the values were placed into the\nbarrier (starting with MIN_LONG and increasing with each BarrierInsertMany)." type: DT_INT64 } output_arg { name: "keys" description: "A one-dimensional tensor of keys, with length num_elements." type: DT_STRING } output_arg { name: "values" description: "One any-dimensional tensor per component in a barrier element. All\nvalues have length num_elements in the 0th dimension." type_list_attr: "component_types" } attr { name: "component_types" type: "list(type)" description: "The type of each component in a value." has_minimum: true minimum: 1 } attr { name: "allow_small_batch" type: "bool" default_value { b: false } description: "Allow to return less than num_elements items if barrier is\nalready closed." } attr { name: "wait_for_incomplete" type: "bool" default_value { b: false } } attr { name: "timeout_ms" type: "int" default_value { i: -1 } description: "If the queue is empty, this operation will block for up to\ntimeout_ms milliseconds.\nNote: This option is not supported yet." } -} -- | batchCholesky :: forall v'1 t . (OneOf '[Double, Float] t) => Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ batchCholesky = batchCholesky' id batchCholesky' :: forall v'1 t . (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ batchCholesky' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "BatchCholesky" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } } } -} -- | batchCholeskyGrad :: forall v'1 v'2 t . (OneOf '[Double, Float] t) => Tensor v'1 t -- ^ __l__ -> Tensor v'2 t -- ^ __grad__ -> Tensor Build t -- ^ __output__ batchCholeskyGrad = batchCholeskyGrad' id batchCholeskyGrad' :: forall v'1 v'2 t . (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __l__ -> Tensor v'2 t -- ^ __grad__ -> Tensor Build t -- ^ __output__ batchCholeskyGrad' op'options l grad | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs l, buildInputs grad] return (opDef "BatchCholeskyGrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "l" type_attr: "T" } input_arg { name: "grad" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } -} -- | batchFFT :: Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__ -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__ batchFFT = batchFFT' id batchFFT' :: OpParams -> Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__ -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__ batchFFT' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "BatchFFT" & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type: DT_COMPLEX64 } output_arg { name: "output" type: DT_COMPLEX64 } -} -- | batchFFT2D :: Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__ -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__ batchFFT2D = batchFFT2D' id batchFFT2D' :: OpParams -> Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__ -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__ batchFFT2D' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "BatchFFT2D" & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type: DT_COMPLEX64 } output_arg { name: "output" type: DT_COMPLEX64 } -} -- | batchFFT3D :: Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__ -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__ batchFFT3D = batchFFT3D' id batchFFT3D' :: OpParams -> Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__ -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__ batchFFT3D' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "BatchFFT3D" & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type: DT_COMPLEX64 } output_arg { name: "output" type: DT_COMPLEX64 } -} -- | batchIFFT :: Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__ -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__ batchIFFT = batchIFFT' id batchIFFT' :: OpParams -> Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__ -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__ batchIFFT' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "BatchIFFT" & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type: DT_COMPLEX64 } output_arg { name: "output" type: DT_COMPLEX64 } -} -- | batchIFFT2D :: Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__ -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__ batchIFFT2D = batchIFFT2D' id batchIFFT2D' :: OpParams -> Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__ -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__ batchIFFT2D' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "BatchIFFT2D" & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type: DT_COMPLEX64 } output_arg { name: "output" type: DT_COMPLEX64 } -} -- | batchIFFT3D :: Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__ -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__ batchIFFT3D = batchIFFT3D' id batchIFFT3D' :: OpParams -> Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__ -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__ batchIFFT3D' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "BatchIFFT3D" & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type: DT_COMPLEX64 } output_arg { name: "output" type: DT_COMPLEX64 } -} -- | Multiplies slices of two tensors in batches. -- -- Multiplies all slices of `Tensor` `x` and `y` (each slice can be -- viewed as an element of a batch), and arranges the individual results -- in a single output tensor of the same batch size. Each of the -- individual slices can optionally be adjointed (to adjoint a matrix -- means to transpose and conjugate it) before multiplication by setting -- the `adj_x` or `adj_y` flag to `True`, which are by default `False`. -- -- The input tensors `x` and `y` are 3-D or higher with shape `[..., r_x, c_x]` -- and `[..., r_y, c_y]`. -- -- The output tensor is 3-D or higher with shape `[..., r_o, c_o]`, where: -- -- r_o = c_x if adj_x else r_x -- c_o = r_y if adj_y else c_y -- -- It is computed as: -- -- output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :]) batchMatMul :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__: 3-D or higher with shape `[..., r_x, c_x]`. -> Tensor v'2 t -- ^ __y__: 3-D or higher with shape `[..., r_y, c_y]`. -> Tensor Build t -- ^ __output__: 3-D or higher with shape `[..., r_o, c_o]` batchMatMul = batchMatMul' id batchMatMul' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__: 3-D or higher with shape `[..., r_x, c_x]`. -> Tensor v'2 t -- ^ __y__: 3-D or higher with shape `[..., r_y, c_y]`. -> Tensor Build t -- ^ __output__: 3-D or higher with shape `[..., r_o, c_o]` batchMatMul' op'options x y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y] return (opDef "BatchMatMul" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" description: "3-D or higher with shape `[..., r_x, c_x]`." type_attr: "T" } input_arg { name: "y" description: "3-D or higher with shape `[..., r_y, c_y]`." type_attr: "T" } output_arg { name: "output" description: "3-D or higher with shape `[..., r_o, c_o]`" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_COMPLEX64 type: DT_COMPLEX128 } } } attr { name: "adj_x" type: "bool" default_value { b: false } description: "If `True`, adjoint the slices of `x`. Defaults to `False`." } attr { name: "adj_y" type: "bool" default_value { b: false } description: "If `True`, adjoint the slices of `y`. Defaults to `False`." } -} -- | batchMatrixBandPart :: forall v'1 v'2 v'3 t . (TensorType t) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 Data.Int.Int64 -- ^ __num_lower__ -> Tensor v'3 Data.Int.Int64 -- ^ __num_upper__ -> Tensor Build t -- ^ __band__ batchMatrixBandPart = batchMatrixBandPart' id batchMatrixBandPart' :: forall v'1 v'2 v'3 t . (TensorType t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 Data.Int.Int64 -- ^ __num_lower__ -> Tensor v'3 Data.Int.Int64 -- ^ __num_upper__ -> Tensor Build t -- ^ __band__ batchMatrixBandPart' op'options input num_lower num_upper | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs num_lower, buildInputs num_upper] return (opDef "BatchMatrixBandPart" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "num_lower" type: DT_INT64 } input_arg { name: "num_upper" type: DT_INT64 } output_arg { name: "band" type_attr: "T" } attr { name: "T" type: "type" } -} -- | batchMatrixDeterminant :: forall v'1 t . (OneOf '[Double, Float] t) => Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ batchMatrixDeterminant = batchMatrixDeterminant' id batchMatrixDeterminant' :: forall v'1 t . (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ batchMatrixDeterminant' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "BatchMatrixDeterminant" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } -} -- | batchMatrixDiag :: forall v'1 t . (TensorType t) => Tensor v'1 t -- ^ __diagonal__ -> Tensor Build t -- ^ __output__ batchMatrixDiag = batchMatrixDiag' id batchMatrixDiag' :: forall v'1 t . (TensorType t) => OpParams -> Tensor v'1 t -- ^ __diagonal__ -> Tensor Build t -- ^ __output__ batchMatrixDiag' op'options diagonal | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs diagonal] return (opDef "BatchMatrixDiag" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "diagonal" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } -} -- | batchMatrixDiagPart :: forall v'1 t . (TensorType t) => Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __diagonal__ batchMatrixDiagPart = batchMatrixDiagPart' id batchMatrixDiagPart' :: forall v'1 t . (TensorType t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __diagonal__ batchMatrixDiagPart' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "BatchMatrixDiagPart" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "diagonal" type_attr: "T" } attr { name: "T" type: "type" } -} -- | batchMatrixInverse :: forall v'1 t . (OneOf '[Double, Float] t) => Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ batchMatrixInverse = batchMatrixInverse' id batchMatrixInverse' :: forall v'1 t . (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ batchMatrixInverse' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "BatchMatrixInverse" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "adjoint" type: "bool" default_value { b: false } } attr { name: "T" type: "type" allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } } } -} -- | batchMatrixSetDiag :: forall v'1 v'2 t . (TensorType t) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 t -- ^ __diagonal__ -> Tensor Build t -- ^ __output__ batchMatrixSetDiag = batchMatrixSetDiag' id batchMatrixSetDiag' :: forall v'1 v'2 t . (TensorType t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 t -- ^ __diagonal__ -> Tensor Build t -- ^ __output__ batchMatrixSetDiag' op'options input diagonal | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs diagonal] return (opDef "BatchMatrixSetDiag" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "diagonal" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } -} -- | batchMatrixSolve :: forall v'1 v'2 t . (OneOf '[Double, Float] t) => Tensor v'1 t -- ^ __matrix__ -> Tensor v'2 t -- ^ __rhs__ -> Tensor Build t -- ^ __output__ batchMatrixSolve = batchMatrixSolve' id batchMatrixSolve' :: forall v'1 v'2 t . (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __matrix__ -> Tensor v'2 t -- ^ __rhs__ -> Tensor Build t -- ^ __output__ batchMatrixSolve' op'options matrix rhs | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs matrix, buildInputs rhs] return (opDef "BatchMatrixSolve" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "matrix" type_attr: "T" } input_arg { name: "rhs" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "adjoint" type: "bool" default_value { b: false } } attr { name: "T" type: "type" allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } } } -} -- | batchMatrixSolveLs :: forall v'1 v'2 v'3 t . (OneOf '[Double, Float] t) => Tensor v'1 t -- ^ __matrix__ -> Tensor v'2 t -- ^ __rhs__ -> Tensor v'3 Double -- ^ __l2_regularizer__ -> Tensor Build t -- ^ __output__ batchMatrixSolveLs = batchMatrixSolveLs' id batchMatrixSolveLs' :: forall v'1 v'2 v'3 t . (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __matrix__ -> Tensor v'2 t -- ^ __rhs__ -> Tensor v'3 Double -- ^ __l2_regularizer__ -> Tensor Build t -- ^ __output__ batchMatrixSolveLs' op'options matrix rhs l2_regularizer | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs matrix, buildInputs rhs, buildInputs l2_regularizer] return (opDef "BatchMatrixSolveLs" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "matrix" type_attr: "T" } input_arg { name: "rhs" type_attr: "T" } input_arg { name: "l2_regularizer" type: DT_DOUBLE } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } } } attr { name: "fast" type: "bool" default_value { b: true } } -} -- | batchMatrixTriangularSolve :: forall v'1 v'2 t . (OneOf '[Double, Float] t) => Tensor v'1 t -- ^ __matrix__ -> Tensor v'2 t -- ^ __rhs__ -> Tensor Build t -- ^ __output__ batchMatrixTriangularSolve = batchMatrixTriangularSolve' id batchMatrixTriangularSolve' :: forall v'1 v'2 t . (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __matrix__ -> Tensor v'2 t -- ^ __rhs__ -> Tensor Build t -- ^ __output__ batchMatrixTriangularSolve' op'options matrix rhs | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs matrix, buildInputs rhs] return (opDef "BatchMatrixTriangularSolve" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "matrix" type_attr: "T" } input_arg { name: "rhs" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "lower" type: "bool" default_value { b: true } } attr { name: "adjoint" type: "bool" default_value { b: false } } attr { name: "T" type: "type" allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } } } -} -- | Batch normalization. -- -- This op is deprecated. Prefer `tf.nn.batch_normalization`. batchNormWithGlobalNormalization :: forall v'1 v'2 v'3 v'4 v'5 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Bool -- ^ __scale_after_normalization__: A bool indicating whether the resulted tensor -- needs to be multiplied with gamma. -> Float -- ^ __variance_epsilon__: A small float number to avoid dividing by 0. -> Tensor v'1 t -- ^ __t__: A 4D input Tensor. -> Tensor v'2 t -- ^ __m__: A 1D mean Tensor with size matching the last dimension of t. -- This is the first output from tf.nn.moments, -- or a saved moving average thereof. -> Tensor v'3 t -- ^ __v__: A 1D variance Tensor with size matching the last dimension of t. -- This is the second output from tf.nn.moments, -- or a saved moving average thereof. -> Tensor v'4 t -- ^ __beta__: A 1D beta Tensor with size matching the last dimension of t. -- An offset to be added to the normalized tensor. -> Tensor v'5 t -- ^ __gamma__: A 1D gamma Tensor with size matching the last dimension of t. -- If "scale_after_normalization" is true, this tensor will be multiplied -- with the normalized tensor. -> Tensor Build t -- ^ __result__ batchNormWithGlobalNormalization = batchNormWithGlobalNormalization' id batchNormWithGlobalNormalization' :: forall v'1 v'2 v'3 v'4 v'5 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Bool -- ^ __scale_after_normalization__: A bool indicating whether the resulted tensor -- needs to be multiplied with gamma. -> Float -- ^ __variance_epsilon__: A small float number to avoid dividing by 0. -> Tensor v'1 t -- ^ __t__: A 4D input Tensor. -> Tensor v'2 t -- ^ __m__: A 1D mean Tensor with size matching the last dimension of t. -- This is the first output from tf.nn.moments, -- or a saved moving average thereof. -> Tensor v'3 t -- ^ __v__: A 1D variance Tensor with size matching the last dimension of t. -- This is the second output from tf.nn.moments, -- or a saved moving average thereof. -> Tensor v'4 t -- ^ __beta__: A 1D beta Tensor with size matching the last dimension of t. -- An offset to be added to the normalized tensor. -> Tensor v'5 t -- ^ __gamma__: A 1D gamma Tensor with size matching the last dimension of t. -- If "scale_after_normalization" is true, this tensor will be multiplied -- with the normalized tensor. -> Tensor Build t -- ^ __result__ batchNormWithGlobalNormalization' op'options scale_after_normalization variance_epsilon t m v beta gamma | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs t, buildInputs m, buildInputs v, buildInputs beta, buildInputs gamma] return (opDef "BatchNormWithGlobalNormalization" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "scale_after_normalization" .~ scale_after_normalization & opAttr "variance_epsilon" .~ variance_epsilon & op'options & opInputs .~ op'inputs) {- input_arg { name: "t" description: "A 4D input Tensor." type_attr: "T" } input_arg { name: "m" description: "A 1D mean Tensor with size matching the last dimension of t.\nThis is the first output from tf.nn.moments,\nor a saved moving average thereof." type_attr: "T" } input_arg { name: "v" description: "A 1D variance Tensor with size matching the last dimension of t.\nThis is the second output from tf.nn.moments,\nor a saved moving average thereof." type_attr: "T" } input_arg { name: "beta" description: "A 1D beta Tensor with size matching the last dimension of t.\nAn offset to be added to the normalized tensor." type_attr: "T" } input_arg { name: "gamma" description: "A 1D gamma Tensor with size matching the last dimension of t.\nIf \"scale_after_normalization\" is true, this tensor will be multiplied\nwith the normalized tensor." type_attr: "T" } output_arg { name: "result" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "variance_epsilon" type: "float" description: "A small float number to avoid dividing by 0." } attr { name: "scale_after_normalization" type: "bool" description: "A bool indicating whether the resulted tensor\nneeds to be multiplied with gamma." } -} -- | Gradients for batch normalization. -- -- This op is deprecated. See `tf.nn.batch_normalization`. batchNormWithGlobalNormalizationGrad :: forall v'1 v'2 v'3 v'4 v'5 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Bool -- ^ __scale_after_normalization__: A bool indicating whether the resulted tensor -- needs to be multiplied with gamma. -> Float -- ^ __variance_epsilon__: A small float number to avoid dividing by 0. -> Tensor v'1 t -- ^ __t__: A 4D input Tensor. -> Tensor v'2 t -- ^ __m__: A 1D mean Tensor with size matching the last dimension of t. -- This is the first output from tf.nn.moments, -- or a saved moving average thereof. -> Tensor v'3 t -- ^ __v__: A 1D variance Tensor with size matching the last dimension of t. -- This is the second output from tf.nn.moments, -- or a saved moving average thereof. -> Tensor v'4 t -- ^ __gamma__: A 1D gamma Tensor with size matching the last dimension of t. -- If "scale_after_normalization" is true, this Tensor will be multiplied -- with the normalized Tensor. -> Tensor v'5 t -- ^ __backprop__: 4D backprop Tensor. -> (Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t) -- ^ (__dx__, __dm__, __dv__, __db__, __dg__) -- -- * __dx__: 4D backprop tensor for input. -- -- * __dm__: 1D backprop tensor for mean. -- -- * __dv__: 1D backprop tensor for variance. -- -- * __db__: 1D backprop tensor for beta. -- -- * __dg__: 1D backprop tensor for gamma. batchNormWithGlobalNormalizationGrad = batchNormWithGlobalNormalizationGrad' id batchNormWithGlobalNormalizationGrad' :: forall v'1 v'2 v'3 v'4 v'5 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Bool -- ^ __scale_after_normalization__: A bool indicating whether the resulted tensor -- needs to be multiplied with gamma. -> Float -- ^ __variance_epsilon__: A small float number to avoid dividing by 0. -> Tensor v'1 t -- ^ __t__: A 4D input Tensor. -> Tensor v'2 t -- ^ __m__: A 1D mean Tensor with size matching the last dimension of t. -- This is the first output from tf.nn.moments, -- or a saved moving average thereof. -> Tensor v'3 t -- ^ __v__: A 1D variance Tensor with size matching the last dimension of t. -- This is the second output from tf.nn.moments, -- or a saved moving average thereof. -> Tensor v'4 t -- ^ __gamma__: A 1D gamma Tensor with size matching the last dimension of t. -- If "scale_after_normalization" is true, this Tensor will be multiplied -- with the normalized Tensor. -> Tensor v'5 t -- ^ __backprop__: 4D backprop Tensor. -> (Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t) -- ^ (__dx__, __dm__, __dv__, __db__, __dg__) -- -- * __dx__: 4D backprop tensor for input. -- -- * __dm__: 1D backprop tensor for mean. -- -- * __dv__: 1D backprop tensor for variance. -- -- * __db__: 1D backprop tensor for beta. -- -- * __dg__: 1D backprop tensor for gamma. batchNormWithGlobalNormalizationGrad' op'options scale_after_normalization variance_epsilon t m v gamma backprop | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs t, buildInputs m, buildInputs v, buildInputs gamma, buildInputs backprop] return (opDef "BatchNormWithGlobalNormalizationGrad" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "scale_after_normalization" .~ scale_after_normalization & opAttr "variance_epsilon" .~ variance_epsilon & op'options & opInputs .~ op'inputs) {- input_arg { name: "t" description: "A 4D input Tensor." type_attr: "T" } input_arg { name: "m" description: "A 1D mean Tensor with size matching the last dimension of t.\nThis is the first output from tf.nn.moments,\nor a saved moving average thereof." type_attr: "T" } input_arg { name: "v" description: "A 1D variance Tensor with size matching the last dimension of t.\nThis is the second output from tf.nn.moments,\nor a saved moving average thereof." type_attr: "T" } input_arg { name: "gamma" description: "A 1D gamma Tensor with size matching the last dimension of t.\nIf \"scale_after_normalization\" is true, this Tensor will be multiplied\nwith the normalized Tensor." type_attr: "T" } input_arg { name: "backprop" description: "4D backprop Tensor." type_attr: "T" } output_arg { name: "dx" description: "4D backprop tensor for input." type_attr: "T" } output_arg { name: "dm" description: "1D backprop tensor for mean." type_attr: "T" } output_arg { name: "dv" description: "1D backprop tensor for variance." type_attr: "T" } output_arg { name: "db" description: "1D backprop tensor for beta." type_attr: "T" } output_arg { name: "dg" description: "1D backprop tensor for gamma." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "variance_epsilon" type: "float" description: "A small float number to avoid dividing by 0." } attr { name: "scale_after_normalization" type: "bool" description: "A bool indicating whether the resulted tensor\nneeds to be multiplied with gamma." } -} -- | batchSelfAdjointEig :: forall v'1 t . (OneOf '[Double, Float] t) => Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ batchSelfAdjointEig = batchSelfAdjointEig' id batchSelfAdjointEig' :: forall v'1 t . (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ batchSelfAdjointEig' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "BatchSelfAdjointEig" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } } } -} -- | batchSelfAdjointEigV2 :: forall v'1 t . (OneOf '[Double, Float] t) => Tensor v'1 t -- ^ __input__ -> (Tensor Build t, Tensor Build t) -- ^ (__e__, __v__) -- -- * __e__ -- -- * __v__ batchSelfAdjointEigV2 = batchSelfAdjointEigV2' id batchSelfAdjointEigV2' :: forall v'1 t . (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> (Tensor Build t, Tensor Build t) -- ^ (__e__, __v__) -- -- * __e__ -- -- * __v__ batchSelfAdjointEigV2' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "BatchSelfAdjointEigV2" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "e" type_attr: "T" } output_arg { name: "v" type_attr: "T" } attr { name: "compute_v" type: "bool" default_value { b: true } } attr { name: "T" type: "type" allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } } } -} -- | batchSvd :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Double, Float] t) => Tensor v'1 t -- ^ __input__ -> (Tensor Build t, Tensor Build t, Tensor Build t) -- ^ (__s__, __u__, __v__) -- -- * __s__ -- -- * __u__ -- -- * __v__ batchSvd = batchSvd' id batchSvd' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> (Tensor Build t, Tensor Build t, Tensor Build t) -- ^ (__s__, __u__, __v__) -- -- * __s__ -- -- * __u__ -- -- * __v__ batchSvd' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "BatchSvd" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "s" type_attr: "T" } output_arg { name: "u" type_attr: "T" } output_arg { name: "v" type_attr: "T" } attr { name: "compute_uv" type: "bool" default_value { b: true } } attr { name: "full_matrices" type: "bool" default_value { b: false } } attr { name: "T" type: "type" allowed_values { list { type: DT_DOUBLE type: DT_FLOAT type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | BatchToSpace for 4-D tensors of type T. -- -- This is a legacy version of the more general BatchToSpaceND. -- -- Rearranges (permutes) data from batch into blocks of spatial data, followed by -- cropping. This is the reverse transformation of SpaceToBatch. More specifically, -- this op outputs a copy of the input tensor where values from the `batch` -- dimension are moved in spatial blocks to the `height` and `width` dimensions, -- followed by cropping along the `height` and `width` dimensions. batchToSpace :: forall v'1 v'2 t tidx . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => Data.Int.Int64 -- ^ __block_size__ -> Tensor v'1 t -- ^ __input__: 4-D tensor with shape -- `[batch*block_size*block_size, height_pad/block_size, width_pad/block_size, -- depth]`. Note that the batch size of the input tensor must be divisible by -- `block_size * block_size`. -> Tensor v'2 tidx -- ^ __crops__: 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies -- how many elements to crop from the intermediate result across the spatial -- dimensions as follows: -- -- crops = [[crop_top, crop_bottom], [crop_left, crop_right]] -> Tensor Build t -- ^ __output__: 4-D with shape `[batch, height, width, depth]`, where: -- -- height = height_pad - crop_top - crop_bottom -- width = width_pad - crop_left - crop_right -- -- The attr `block_size` must be greater than one. It indicates the block size. -- -- Some examples: -- -- (1) For the following input of shape `[4, 1, 1, 1]` and block_size of 2: -- -- ```prettyprint -- [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] -- ``` -- -- The output tensor has shape `[1, 2, 2, 1]` and value: -- -- ```prettyprint -- x = [[[[1], [2]], [[3], [4]]]] -- ``` -- -- (2) For the following input of shape `[4, 1, 1, 3]` and block_size of 2: -- -- ```prettyprint -- [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]] -- ``` -- -- The output tensor has shape `[1, 2, 2, 3]` and value: -- -- ```prettyprint -- x = [[[[1, 2, 3], [4, 5, 6]], -- [[7, 8, 9], [10, 11, 12]]]] -- ``` -- -- (3) For the following input of shape `[4, 2, 2, 1]` and block_size of 2: -- -- ```prettyprint -- x = [[[[1], [3]], [[9], [11]]], -- [[[2], [4]], [[10], [12]]], -- [[[5], [7]], [[13], [15]]], -- [[[6], [8]], [[14], [16]]]] -- ``` -- -- The output tensor has shape `[1, 4, 4, 1]` and value: -- -- ```prettyprint -- x = [[[1], [2], [3], [4]], -- [[5], [6], [7], [8]], -- [[9], [10], [11], [12]], -- [[13], [14], [15], [16]]] -- ``` -- -- (4) For the following input of shape `[8, 1, 2, 1]` and block_size of 2: -- -- ```prettyprint -- x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]], -- [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]] -- ``` -- -- The output tensor has shape `[2, 2, 4, 1]` and value: -- -- ```prettyprint -- x = [[[[1], [3]], [[5], [7]]], -- [[[2], [4]], [[10], [12]]], -- [[[5], [7]], [[13], [15]]], -- [[[6], [8]], [[14], [16]]]] -- ``` batchToSpace = batchToSpace' id batchToSpace' :: forall v'1 v'2 t tidx . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => OpParams -> Data.Int.Int64 -- ^ __block_size__ -> Tensor v'1 t -- ^ __input__: 4-D tensor with shape -- `[batch*block_size*block_size, height_pad/block_size, width_pad/block_size, -- depth]`. Note that the batch size of the input tensor must be divisible by -- `block_size * block_size`. -> Tensor v'2 tidx -- ^ __crops__: 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies -- how many elements to crop from the intermediate result across the spatial -- dimensions as follows: -- -- crops = [[crop_top, crop_bottom], [crop_left, crop_right]] -> Tensor Build t -- ^ __output__: 4-D with shape `[batch, height, width, depth]`, where: -- -- height = height_pad - crop_top - crop_bottom -- width = width_pad - crop_left - crop_right -- -- The attr `block_size` must be greater than one. It indicates the block size. -- -- Some examples: -- -- (1) For the following input of shape `[4, 1, 1, 1]` and block_size of 2: -- -- ```prettyprint -- [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] -- ``` -- -- The output tensor has shape `[1, 2, 2, 1]` and value: -- -- ```prettyprint -- x = [[[[1], [2]], [[3], [4]]]] -- ``` -- -- (2) For the following input of shape `[4, 1, 1, 3]` and block_size of 2: -- -- ```prettyprint -- [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]] -- ``` -- -- The output tensor has shape `[1, 2, 2, 3]` and value: -- -- ```prettyprint -- x = [[[[1, 2, 3], [4, 5, 6]], -- [[7, 8, 9], [10, 11, 12]]]] -- ``` -- -- (3) For the following input of shape `[4, 2, 2, 1]` and block_size of 2: -- -- ```prettyprint -- x = [[[[1], [3]], [[9], [11]]], -- [[[2], [4]], [[10], [12]]], -- [[[5], [7]], [[13], [15]]], -- [[[6], [8]], [[14], [16]]]] -- ``` -- -- The output tensor has shape `[1, 4, 4, 1]` and value: -- -- ```prettyprint -- x = [[[1], [2], [3], [4]], -- [[5], [6], [7], [8]], -- [[9], [10], [11], [12]], -- [[13], [14], [15], [16]]] -- ``` -- -- (4) For the following input of shape `[8, 1, 2, 1]` and block_size of 2: -- -- ```prettyprint -- x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]], -- [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]] -- ``` -- -- The output tensor has shape `[2, 2, 4, 1]` and value: -- -- ```prettyprint -- x = [[[[1], [3]], [[5], [7]]], -- [[[2], [4]], [[10], [12]]], -- [[[5], [7]], [[13], [15]]], -- [[[6], [8]], [[14], [16]]]] -- ``` batchToSpace' op'options block_size input crops | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs crops] return (opDef "BatchToSpace" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tidx" .~ tensorType (undefined :: tidx) & opAttr "block_size" .~ block_size & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "4-D tensor with shape\n`[batch*block_size*block_size, height_pad/block_size, width_pad/block_size,\n depth]`. Note that the batch size of the input tensor must be divisible by\n`block_size * block_size`." type_attr: "T" } input_arg { name: "crops" description: "2-D tensor of non-negative integers with shape `[2, 2]`. It specifies\nhow many elements to crop from the intermediate result across the spatial\ndimensions as follows:\n\n crops = [[crop_top, crop_bottom], [crop_left, crop_right]]" type_attr: "Tidx" } output_arg { name: "output" description: "4-D with shape `[batch, height, width, depth]`, where:\n\n height = height_pad - crop_top - crop_bottom\n width = width_pad - crop_left - crop_right\n\nThe attr `block_size` must be greater than one. It indicates the block size.\n\nSome examples:\n\n(1) For the following input of shape `[4, 1, 1, 1]` and block_size of 2:\n\n```prettyprint\n[[[[1]]], [[[2]]], [[[3]]], [[[4]]]]\n```\n\nThe output tensor has shape `[1, 2, 2, 1]` and value:\n\n```prettyprint\nx = [[[[1], [2]], [[3], [4]]]]\n```\n\n(2) For the following input of shape `[4, 1, 1, 3]` and block_size of 2:\n\n```prettyprint\n[[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]\n```\n\nThe output tensor has shape `[1, 2, 2, 3]` and value:\n\n```prettyprint\nx = [[[[1, 2, 3], [4, 5, 6]],\n [[7, 8, 9], [10, 11, 12]]]]\n```\n\n(3) For the following input of shape `[4, 2, 2, 1]` and block_size of 2:\n\n```prettyprint\nx = [[[[1], [3]], [[9], [11]]],\n [[[2], [4]], [[10], [12]]],\n [[[5], [7]], [[13], [15]]],\n [[[6], [8]], [[14], [16]]]]\n```\n\nThe output tensor has shape `[1, 4, 4, 1]` and value:\n\n```prettyprint\nx = [[[1], [2], [3], [4]],\n [[5], [6], [7], [8]],\n [[9], [10], [11], [12]],\n [[13], [14], [15], [16]]]\n```\n\n(4) For the following input of shape `[8, 1, 2, 1]` and block_size of 2:\n\n```prettyprint\nx = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],\n [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]\n```\n\nThe output tensor has shape `[2, 2, 4, 1]` and value:\n\n```prettyprint\nx = [[[[1], [3]], [[5], [7]]],\n [[[2], [4]], [[10], [12]]],\n [[[5], [7]], [[13], [15]]],\n [[[6], [8]], [[14], [16]]]]\n```" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "block_size" type: "int" has_minimum: true minimum: 2 } attr { name: "Tidx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | BatchToSpace for N-D tensors of type T. -- -- This operation reshapes the "batch" dimension 0 into `M + 1` dimensions of shape -- `block_shape + [batch]`, interleaves these blocks back into the grid defined by -- the spatial dimensions `[1, ..., M]`, to obtain a result with the same rank as -- the input. The spatial dimensions of this intermediate result are then -- optionally cropped according to `crops` to produce the output. This is the -- reverse of SpaceToBatch. See below for a precise description. batchToSpaceND :: forall v'1 v'2 v'3 t tblock_shape tcrops . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tblock_shape, OneOf '[Data.Int.Int32, Data.Int.Int64] tcrops) => Tensor v'1 t -- ^ __input__: N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`, -- where spatial_shape has M dimensions. -> Tensor v'2 tblock_shape -- ^ __block_shape__: 1-D with shape `[M]`, all values must be >= 1. -> Tensor v'3 tcrops -- ^ __crops__: 2-D with shape `[M, 2]`, all values must be >= 0. -- `crops[i] = [crop_start, crop_end]` specifies the amount to crop from input -- dimension `i + 1`, which corresponds to spatial dimension `i`. It is -- required that -- `crop_start[i] + crop_end[i] <= block_shape[i] * input_shape[i + 1]`. -- -- This operation is equivalent to the following steps: -- -- 1. Reshape `input` to `reshaped` of shape: -- [block_shape[0], ..., block_shape[M-1], -- batch / prod(block_shape), -- input_shape[1], ..., input_shape[N-1]] -- -- 2. Permute dimensions of `reshaped` to produce `permuted` of shape -- [batch / prod(block_shape), -- -- input_shape[1], block_shape[0], -- ..., -- input_shape[M], block_shape[M-1], -- -- input_shape[M+1], ..., input_shape[N-1]] -- -- 3. Reshape `permuted` to produce `reshaped_permuted` of shape -- [batch / prod(block_shape), -- -- input_shape[1] * block_shape[0], -- ..., -- input_shape[M] * block_shape[M-1], -- -- input_shape[M+1], -- ..., -- input_shape[N-1]] -- -- 4. Crop the start and end of dimensions `[1, ..., M]` of -- `reshaped_permuted` according to `crops` to produce the output of shape: -- [batch / prod(block_shape), -- -- input_shape[1] * block_shape[0] - crops[0,0] - crops[0,1], -- ..., -- input_shape[M] * block_shape[M-1] - crops[M-1,0] - crops[M-1,1], -- -- input_shape[M+1], ..., input_shape[N-1]] -- -- Some examples: -- -- (1) For the following input of shape `[4, 1, 1, 1]`, `block_shape = [2, 2]`, and -- `crops = [[0, 0], [0, 0]]`: -- -- ```prettyprint -- [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] -- ``` -- -- The output tensor has shape `[1, 2, 2, 1]` and value: -- -- ```prettyprint -- x = [[[[1], [2]], [[3], [4]]]] -- ``` -- -- (2) For the following input of shape `[4, 1, 1, 3]`, `block_shape = [2, 2]`, and -- `crops = [[0, 0], [0, 0]]`: -- -- ```prettyprint -- [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]] -- ``` -- -- The output tensor has shape `[1, 2, 2, 3]` and value: -- -- ```prettyprint -- x = [[[[1, 2, 3], [4, 5, 6]], -- [[7, 8, 9], [10, 11, 12]]]] -- ``` -- -- (3) For the following input of shape `[4, 2, 2, 1]`, `block_shape = [2, 2]`, and -- `crops = [[0, 0], [0, 0]]`: -- -- ```prettyprint -- x = [[[[1], [3]], [[9], [11]]], -- [[[2], [4]], [[10], [12]]], -- [[[5], [7]], [[13], [15]]], -- [[[6], [8]], [[14], [16]]]] -- ``` -- -- The output tensor has shape `[1, 4, 4, 1]` and value: -- -- ```prettyprint -- x = [[[1], [2], [3], [4]], -- [[5], [6], [7], [8]], -- [[9], [10], [11], [12]], -- [[13], [14], [15], [16]]] -- ``` -- -- (4) For the following input of shape `[8, 1, 3, 1]`, `block_shape = [2, 2]`, and -- `crops = [[0, 0], [2, 0]]`: -- -- ```prettyprint -- x = [[[[0], [1], [3]]], [[[0], [9], [11]]], -- [[[0], [2], [4]]], [[[0], [10], [12]]], -- [[[0], [5], [7]]], [[[0], [13], [15]]], -- [[[0], [6], [8]]], [[[0], [14], [16]]]] -- ``` -- -- The output tensor has shape `[2, 2, 4, 1]` and value: -- -- ```prettyprint -- x = [[[[1], [2], [3], [4]], -- [[5], [6], [7], [8]]], -- [[[9], [10], [11], [12]], -- [[13], [14], [15], [16]]]] -- ``` -> Tensor Build t -- ^ __output__ batchToSpaceND = batchToSpaceND' id batchToSpaceND' :: forall v'1 v'2 v'3 t tblock_shape tcrops . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tblock_shape, OneOf '[Data.Int.Int32, Data.Int.Int64] tcrops) => OpParams -> Tensor v'1 t -- ^ __input__: N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`, -- where spatial_shape has M dimensions. -> Tensor v'2 tblock_shape -- ^ __block_shape__: 1-D with shape `[M]`, all values must be >= 1. -> Tensor v'3 tcrops -- ^ __crops__: 2-D with shape `[M, 2]`, all values must be >= 0. -- `crops[i] = [crop_start, crop_end]` specifies the amount to crop from input -- dimension `i + 1`, which corresponds to spatial dimension `i`. It is -- required that -- `crop_start[i] + crop_end[i] <= block_shape[i] * input_shape[i + 1]`. -- -- This operation is equivalent to the following steps: -- -- 1. Reshape `input` to `reshaped` of shape: -- [block_shape[0], ..., block_shape[M-1], -- batch / prod(block_shape), -- input_shape[1], ..., input_shape[N-1]] -- -- 2. Permute dimensions of `reshaped` to produce `permuted` of shape -- [batch / prod(block_shape), -- -- input_shape[1], block_shape[0], -- ..., -- input_shape[M], block_shape[M-1], -- -- input_shape[M+1], ..., input_shape[N-1]] -- -- 3. Reshape `permuted` to produce `reshaped_permuted` of shape -- [batch / prod(block_shape), -- -- input_shape[1] * block_shape[0], -- ..., -- input_shape[M] * block_shape[M-1], -- -- input_shape[M+1], -- ..., -- input_shape[N-1]] -- -- 4. Crop the start and end of dimensions `[1, ..., M]` of -- `reshaped_permuted` according to `crops` to produce the output of shape: -- [batch / prod(block_shape), -- -- input_shape[1] * block_shape[0] - crops[0,0] - crops[0,1], -- ..., -- input_shape[M] * block_shape[M-1] - crops[M-1,0] - crops[M-1,1], -- -- input_shape[M+1], ..., input_shape[N-1]] -- -- Some examples: -- -- (1) For the following input of shape `[4, 1, 1, 1]`, `block_shape = [2, 2]`, and -- `crops = [[0, 0], [0, 0]]`: -- -- ```prettyprint -- [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] -- ``` -- -- The output tensor has shape `[1, 2, 2, 1]` and value: -- -- ```prettyprint -- x = [[[[1], [2]], [[3], [4]]]] -- ``` -- -- (2) For the following input of shape `[4, 1, 1, 3]`, `block_shape = [2, 2]`, and -- `crops = [[0, 0], [0, 0]]`: -- -- ```prettyprint -- [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]] -- ``` -- -- The output tensor has shape `[1, 2, 2, 3]` and value: -- -- ```prettyprint -- x = [[[[1, 2, 3], [4, 5, 6]], -- [[7, 8, 9], [10, 11, 12]]]] -- ``` -- -- (3) For the following input of shape `[4, 2, 2, 1]`, `block_shape = [2, 2]`, and -- `crops = [[0, 0], [0, 0]]`: -- -- ```prettyprint -- x = [[[[1], [3]], [[9], [11]]], -- [[[2], [4]], [[10], [12]]], -- [[[5], [7]], [[13], [15]]], -- [[[6], [8]], [[14], [16]]]] -- ``` -- -- The output tensor has shape `[1, 4, 4, 1]` and value: -- -- ```prettyprint -- x = [[[1], [2], [3], [4]], -- [[5], [6], [7], [8]], -- [[9], [10], [11], [12]], -- [[13], [14], [15], [16]]] -- ``` -- -- (4) For the following input of shape `[8, 1, 3, 1]`, `block_shape = [2, 2]`, and -- `crops = [[0, 0], [2, 0]]`: -- -- ```prettyprint -- x = [[[[0], [1], [3]]], [[[0], [9], [11]]], -- [[[0], [2], [4]]], [[[0], [10], [12]]], -- [[[0], [5], [7]]], [[[0], [13], [15]]], -- [[[0], [6], [8]]], [[[0], [14], [16]]]] -- ``` -- -- The output tensor has shape `[2, 2, 4, 1]` and value: -- -- ```prettyprint -- x = [[[[1], [2], [3], [4]], -- [[5], [6], [7], [8]]], -- [[[9], [10], [11], [12]], -- [[13], [14], [15], [16]]]] -- ``` -> Tensor Build t -- ^ __output__ batchToSpaceND' op'options input block_shape crops | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs block_shape, buildInputs crops] return (opDef "BatchToSpaceND" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tblock_shape" .~ tensorType (undefined :: tblock_shape) & opAttr "Tcrops" .~ tensorType (undefined :: tcrops) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`,\nwhere spatial_shape has M dimensions." type_attr: "T" } input_arg { name: "block_shape" description: "1-D with shape `[M]`, all values must be >= 1." type_attr: "Tblock_shape" } input_arg { name: "crops" description: "2-D with shape `[M, 2]`, all values must be >= 0.\n `crops[i] = [crop_start, crop_end]` specifies the amount to crop from input\n dimension `i + 1`, which corresponds to spatial dimension `i`. It is\n required that\n `crop_start[i] + crop_end[i] <= block_shape[i] * input_shape[i + 1]`.\n\nThis operation is equivalent to the following steps:\n\n1. Reshape `input` to `reshaped` of shape:\n [block_shape[0], ..., block_shape[M-1],\n batch / prod(block_shape),\n input_shape[1], ..., input_shape[N-1]]\n\n2. Permute dimensions of `reshaped` to produce `permuted` of shape\n [batch / prod(block_shape),\n\n input_shape[1], block_shape[0],\n ...,\n input_shape[M], block_shape[M-1],\n\n input_shape[M+1], ..., input_shape[N-1]]\n\n3. Reshape `permuted` to produce `reshaped_permuted` of shape\n [batch / prod(block_shape),\n\n input_shape[1] * block_shape[0],\n ...,\n input_shape[M] * block_shape[M-1],\n\n input_shape[M+1],\n ...,\n input_shape[N-1]]\n\n4. Crop the start and end of dimensions `[1, ..., M]` of\n `reshaped_permuted` according to `crops` to produce the output of shape:\n [batch / prod(block_shape),\n\n input_shape[1] * block_shape[0] - crops[0,0] - crops[0,1],\n ...,\n input_shape[M] * block_shape[M-1] - crops[M-1,0] - crops[M-1,1],\n\n input_shape[M+1], ..., input_shape[N-1]]\n\nSome examples:\n\n(1) For the following input of shape `[4, 1, 1, 1]`, `block_shape = [2, 2]`, and\n `crops = [[0, 0], [0, 0]]`:\n\n```prettyprint\n[[[[1]]], [[[2]]], [[[3]]], [[[4]]]]\n```\n\nThe output tensor has shape `[1, 2, 2, 1]` and value:\n\n```prettyprint\nx = [[[[1], [2]], [[3], [4]]]]\n```\n\n(2) For the following input of shape `[4, 1, 1, 3]`, `block_shape = [2, 2]`, and\n `crops = [[0, 0], [0, 0]]`:\n\n```prettyprint\n[[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]\n```\n\nThe output tensor has shape `[1, 2, 2, 3]` and value:\n\n```prettyprint\nx = [[[[1, 2, 3], [4, 5, 6]],\n [[7, 8, 9], [10, 11, 12]]]]\n```\n\n(3) For the following input of shape `[4, 2, 2, 1]`, `block_shape = [2, 2]`, and\n `crops = [[0, 0], [0, 0]]`:\n\n```prettyprint\nx = [[[[1], [3]], [[9], [11]]],\n [[[2], [4]], [[10], [12]]],\n [[[5], [7]], [[13], [15]]],\n [[[6], [8]], [[14], [16]]]]\n```\n\nThe output tensor has shape `[1, 4, 4, 1]` and value:\n\n```prettyprint\nx = [[[1], [2], [3], [4]],\n [[5], [6], [7], [8]],\n [[9], [10], [11], [12]],\n [[13], [14], [15], [16]]]\n```\n\n(4) For the following input of shape `[8, 1, 3, 1]`, `block_shape = [2, 2]`, and\n `crops = [[0, 0], [2, 0]]`:\n\n```prettyprint\nx = [[[[0], [1], [3]]], [[[0], [9], [11]]],\n [[[0], [2], [4]]], [[[0], [10], [12]]],\n [[[0], [5], [7]]], [[[0], [13], [15]]],\n [[[0], [6], [8]]], [[[0], [14], [16]]]]\n```\n\nThe output tensor has shape `[2, 2, 4, 1]` and value:\n\n```prettyprint\nx = [[[[1], [2], [3], [4]],\n [[5], [6], [7], [8]]],\n [[[9], [10], [11], [12]],\n [[13], [14], [15], [16]]]]\n```" type_attr: "Tcrops" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "Tblock_shape" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "Tcrops" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | Compute the regularized incomplete beta integral \\(I_x(a, b)\\). -- -- The regularized incomplete beta integral is defined as: -- -- ``` -- I_x(a, b) = \frac{B(x; a, b)}{B(a, b)} -- ``` -- where -- -- ``` -- B(x; a, b) = \int_0^x t^{a-1} (1 - t)^{b-1} dt -- ``` -- -- is the incomplete beta function and \\(B(a, b)\\) is the *complete* -- beta function. betainc :: forall v'1 v'2 v'3 t . (OneOf '[Double, Float] t) => Tensor v'1 t -- ^ __a__ -> Tensor v'2 t -- ^ __b__ -> Tensor v'3 t -- ^ __x__ -> Tensor Build t -- ^ __z__ betainc = betainc' id betainc' :: forall v'1 v'2 v'3 t . (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __a__ -> Tensor v'2 t -- ^ __b__ -> Tensor v'3 t -- ^ __x__ -> Tensor Build t -- ^ __z__ betainc' op'options a b x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs a, buildInputs b, buildInputs x] return (opDef "Betainc" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "a" type_attr: "T" } input_arg { name: "b" type_attr: "T" } input_arg { name: "x" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } -} -- | Adds `bias` to `value`. -- -- This is a special case of `tf.add` where `bias` is restricted to be 1-D. -- Broadcasting is supported, so `value` may have any number of dimensions. biasAdd :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __value__: Any number of dimensions. -> Tensor v'2 t -- ^ __bias__: 1-D with size the last dimension of `value`. -> Tensor Build t -- ^ __output__: Broadcasted sum of `value` and `bias`. biasAdd = biasAdd' id biasAdd' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __value__: Any number of dimensions. -> Tensor v'2 t -- ^ __bias__: 1-D with size the last dimension of `value`. -> Tensor Build t -- ^ __output__: Broadcasted sum of `value` and `bias`. biasAdd' op'options value bias | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs value, buildInputs bias] return (opDef "BiasAdd" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "value" description: "Any number of dimensions." type_attr: "T" } input_arg { name: "bias" description: "1-D with size the last dimension of `value`." type_attr: "T" } output_arg { name: "output" description: "Broadcasted sum of `value` and `bias`." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "data_format" type: "string" default_value { s: "NHWC" } description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the bias tensor will be added to the last dimension\nof the value tensor.\nAlternatively, the format could be \"NCHW\", the data storage order of:\n [batch, in_channels, in_height, in_width].\nThe tensor will be added to \"in_channels\", the third-to-the-last\n dimension." allowed_values { list { s: "NHWC" s: "NCHW" } } } -} -- | The backward operation for "BiasAdd" on the "bias" tensor. -- -- It accumulates all the values from out_backprop into the feature dimension. -- For NHWC data format, the feature dimension is the last. For NCHW data format, -- the feature dimension is the third-to-last. biasAddGrad :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __out_backprop__: Any number of dimensions. -> Tensor Build t -- ^ __output__: 1-D with size the feature dimension of `out_backprop`. biasAddGrad = biasAddGrad' id biasAddGrad' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __out_backprop__: Any number of dimensions. -> Tensor Build t -- ^ __output__: 1-D with size the feature dimension of `out_backprop`. biasAddGrad' op'options out_backprop | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs out_backprop] return (opDef "BiasAddGrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "out_backprop" description: "Any number of dimensions." type_attr: "T" } output_arg { name: "output" description: "1-D with size the feature dimension of `out_backprop`." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "data_format" type: "string" default_value { s: "NHWC" } description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the bias tensor will be added to the last dimension\nof the value tensor.\nAlternatively, the format could be \"NCHW\", the data storage order of:\n [batch, in_channels, in_height, in_width].\nThe tensor will be added to \"in_channels\", the third-to-the-last\n dimension." allowed_values { list { s: "NHWC" s: "NCHW" } } } -} -- | Adds `bias` to `value`. -- -- This is a deprecated version of BiasAdd and will be soon removed. -- -- This is a special case of `tf.add` where `bias` is restricted to be 1-D. -- Broadcasting is supported, so `value` may have any number of dimensions. biasAddV1 :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __value__: Any number of dimensions. -> Tensor v'2 t -- ^ __bias__: 1-D with size the last dimension of `value`. -> Tensor Build t -- ^ __output__: Broadcasted sum of `value` and `bias`. biasAddV1 = biasAddV1' id biasAddV1' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __value__: Any number of dimensions. -> Tensor v'2 t -- ^ __bias__: 1-D with size the last dimension of `value`. -> Tensor Build t -- ^ __output__: Broadcasted sum of `value` and `bias`. biasAddV1' op'options value bias | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs value, buildInputs bias] return (opDef "BiasAddV1" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "value" description: "Any number of dimensions." type_attr: "T" } input_arg { name: "bias" description: "1-D with size the last dimension of `value`." type_attr: "T" } output_arg { name: "output" description: "Broadcasted sum of `value` and `bias`." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } -} -- | Counts the number of occurrences of each value in an integer array. -- -- Outputs a vector with length `size` and the same dtype as `weights`. If -- `weights` are empty, then index `i` stores the number of times the value `i` is -- counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of -- the value in `weights` at each index where the corresponding value in `arr` is -- `i`. -- -- Values in `arr` outside of the range [0, size) are ignored. bincount :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int32, Data.Int.Int64, Double, Float] t) => Tensor v'1 Data.Int.Int32 -- ^ __arr__: int32 `Tensor`. -> Tensor v'2 Data.Int.Int32 -- ^ __size__: non-negative int32 scalar `Tensor`. -> Tensor v'3 t -- ^ __weights__: is an int32, int64, float32, or float64 `Tensor` with the same -- shape as `arr`, or a length-0 `Tensor`, in which case it acts as all weights -- equal to 1. -> Tensor Build t -- ^ __bins__: 1D `Tensor` with length equal to `size`. The counts or summed weights for -- each value in the range [0, size). bincount = bincount' id bincount' :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int32, Data.Int.Int64, Double, Float] t) => OpParams -> Tensor v'1 Data.Int.Int32 -- ^ __arr__: int32 `Tensor`. -> Tensor v'2 Data.Int.Int32 -- ^ __size__: non-negative int32 scalar `Tensor`. -> Tensor v'3 t -- ^ __weights__: is an int32, int64, float32, or float64 `Tensor` with the same -- shape as `arr`, or a length-0 `Tensor`, in which case it acts as all weights -- equal to 1. -> Tensor Build t -- ^ __bins__: 1D `Tensor` with length equal to `size`. The counts or summed weights for -- each value in the range [0, size). bincount' op'options arr size weights | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs arr, buildInputs size, buildInputs weights] return (opDef "Bincount" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "arr" description: "int32 `Tensor`." type: DT_INT32 } input_arg { name: "size" description: "non-negative int32 scalar `Tensor`." type: DT_INT32 } input_arg { name: "weights" description: "is an int32, int64, float32, or float64 `Tensor` with the same\nshape as `arr`, or a length-0 `Tensor`, in which case it acts as all weights\nequal to 1." type_attr: "T" } output_arg { name: "bins" description: "1D `Tensor` with length equal to `size`. The counts or summed weights for\neach value in the range [0, size)." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 type: DT_FLOAT type: DT_DOUBLE } } } -} -- | Bitcasts a tensor from one type to another without copying data. -- -- Given a tensor `input`, this operation returns a tensor that has the same buffer -- data as `input` with datatype `type`. -- -- If the input datatype `T` is larger than the output datatype `type` then the -- shape changes from [...] to [..., sizeof(`T`)/sizeof(`type`)]. -- -- If `T` is smaller than `type`, the operator requires that the rightmost -- dimension be equal to sizeof(`type`)/sizeof(`T`). The shape then goes from -- [..., sizeof(`type`)/sizeof(`T`)] to [...]. -- -- *NOTE*: Bitcast is implemented as a low-level cast, so machines with different -- endian orderings will give different results. bitcast :: forall v'1 t type' . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] type') => Tensor v'1 t -- ^ __input__ -> Tensor Build type' -- ^ __output__ bitcast = bitcast' id bitcast' :: forall v'1 t type' . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] type') => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor Build type' -- ^ __output__ bitcast' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "Bitcast" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "type" .~ tensorType (undefined :: type') & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type_attr: "type" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "type" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } -} -- | Return the shape of s0 op s1 with broadcast. -- -- Given `s0` and `s1`, tensors that represent shapes, compute `r0`, the -- broadcasted shape. `s0`, `s1` and `r0` are all integer vectors. broadcastArgs :: forall v'1 v'2 t . (OneOf '[Data.Int.Int32, Data.Int.Int64] t) => Tensor v'1 t -- ^ __s0__ -> Tensor v'2 t -- ^ __s1__ -> Tensor Build t -- ^ __r0__ broadcastArgs = broadcastArgs' id broadcastArgs' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int32, Data.Int.Int64] t) => OpParams -> Tensor v'1 t -- ^ __s0__ -> Tensor v'2 t -- ^ __s1__ -> Tensor Build t -- ^ __r0__ broadcastArgs' op'options s0 s1 | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs s0, buildInputs s1] return (opDef "BroadcastArgs" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "s0" type_attr: "T" } input_arg { name: "s1" type_attr: "T" } output_arg { name: "r0" type_attr: "T" } attr { name: "T" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | Return the reduction indices for computing gradients of s0 op s1 with broadcast. -- -- This is typically used by gradient computations for a broadcasting operation. broadcastGradientArgs :: forall v'1 v'2 t . (OneOf '[Data.Int.Int32, Data.Int.Int64] t) => Tensor v'1 t -- ^ __s0__ -> Tensor v'2 t -- ^ __s1__ -> (Tensor Build t, Tensor Build t) -- ^ (__r0__, __r1__) -- -- * __r0__ -- -- * __r1__ broadcastGradientArgs = broadcastGradientArgs' id broadcastGradientArgs' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int32, Data.Int.Int64] t) => OpParams -> Tensor v'1 t -- ^ __s0__ -> Tensor v'2 t -- ^ __s1__ -> (Tensor Build t, Tensor Build t) -- ^ (__r0__, __r1__) -- -- * __r0__ -- -- * __r1__ broadcastGradientArgs' op'options s0 s1 | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs s0, buildInputs s1] return (opDef "BroadcastGradientArgs" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "s0" type_attr: "T" } input_arg { name: "s1" type_attr: "T" } output_arg { name: "r0" type_attr: "T" } output_arg { name: "r1" type_attr: "T" } attr { name: "T" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | Performs beam search decoding on the logits given in input. -- -- A note about the attribute merge_repeated: For the beam search decoder, -- this means that if consecutive entries in a beam are the same, only -- the first of these is emitted. That is, when the top path is "A B B B B", -- "A B" is returned if merge_repeated = True but "A B B B B" is -- returned if merge_repeated = False. cTCBeamSearchDecoder :: Data.Int.Int64 -- ^ __beam_width__: A scalar >= 0 (beam search beam width). -> Data.Int.Int64 -- ^ __top_paths__: A scalar >= 0, <= beam_width (controls output size). -> Tensor v'1 Float -- ^ __inputs__: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits. -> Tensor v'2 Data.Int.Int32 -- ^ __sequence_length__: A vector containing sequence lengths, size `(batch)`. -> ([Tensor Build Data.Int.Int64], [Tensor Build Data.Int.Int64], [Tensor Build Data.Int.Int64], Tensor Build Float) -- ^ (__decoded_indices__, __decoded_values__, __decoded_shape__, __log_probability__) -- -- * __decoded_indices__: A list (length: top_paths) of indices matrices. Matrix j, -- size `(total_decoded_outputs[j] x 2)`, has indices of a -- `SparseTensor`. The rows store: [batch, time]. -- -- * __decoded_values__: A list (length: top_paths) of values vectors. Vector j, -- size `(length total_decoded_outputs[j])`, has the values of a -- `SparseTensor`. The vector stores the decoded classes for beam j. -- -- * __decoded_shape__: A list (length: top_paths) of shape vector. Vector j, -- size `(2)`, stores the shape of the decoded `SparseTensor[j]`. -- Its values are: `[batch_size, max_decoded_length[j]]`. -- -- * __log_probability__: A matrix, shaped: `(batch_size x top_paths)`. The -- sequence log-probabilities. cTCBeamSearchDecoder = cTCBeamSearchDecoder' id cTCBeamSearchDecoder' :: OpParams -> Data.Int.Int64 -- ^ __beam_width__: A scalar >= 0 (beam search beam width). -> Data.Int.Int64 -- ^ __top_paths__: A scalar >= 0, <= beam_width (controls output size). -> Tensor v'1 Float -- ^ __inputs__: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits. -> Tensor v'2 Data.Int.Int32 -- ^ __sequence_length__: A vector containing sequence lengths, size `(batch)`. -> ([Tensor Build Data.Int.Int64], [Tensor Build Data.Int.Int64], [Tensor Build Data.Int.Int64], Tensor Build Float) -- ^ (__decoded_indices__, __decoded_values__, __decoded_shape__, __log_probability__) -- -- * __decoded_indices__: A list (length: top_paths) of indices matrices. Matrix j, -- size `(total_decoded_outputs[j] x 2)`, has indices of a -- `SparseTensor`. The rows store: [batch, time]. -- -- * __decoded_values__: A list (length: top_paths) of values vectors. Vector j, -- size `(length total_decoded_outputs[j])`, has the values of a -- `SparseTensor`. The vector stores the decoded classes for beam j. -- -- * __decoded_shape__: A list (length: top_paths) of shape vector. Vector j, -- size `(2)`, stores the shape of the decoded `SparseTensor[j]`. -- Its values are: `[batch_size, max_decoded_length[j]]`. -- -- * __log_probability__: A matrix, shaped: `(batch_size x top_paths)`. The -- sequence log-probabilities. cTCBeamSearchDecoder' op'options beam_width top_paths inputs sequence_length | eqLengthGuard [] = pureOp [top_paths, top_paths, top_paths] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs inputs, buildInputs sequence_length] return (opDef "CTCBeamSearchDecoder" & opAttr "beam_width" .~ beam_width & opAttr "top_paths" .~ top_paths & op'options & opInputs .~ op'inputs) {- input_arg { name: "inputs" description: "3-D, shape: `(max_time x batch_size x num_classes)`, the logits." type: DT_FLOAT } input_arg { name: "sequence_length" description: "A vector containing sequence lengths, size `(batch)`." type: DT_INT32 } output_arg { name: "decoded_indices" description: "A list (length: top_paths) of indices matrices. Matrix j,\nsize `(total_decoded_outputs[j] x 2)`, has indices of a\n`SparseTensor`. The rows store: [batch, time]." type: DT_INT64 number_attr: "top_paths" } output_arg { name: "decoded_values" description: "A list (length: top_paths) of values vectors. Vector j,\nsize `(length total_decoded_outputs[j])`, has the values of a\n`SparseTensor`. The vector stores the decoded classes for beam j." type: DT_INT64 number_attr: "top_paths" } output_arg { name: "decoded_shape" description: "A list (length: top_paths) of shape vector. Vector j,\nsize `(2)`, stores the shape of the decoded `SparseTensor[j]`.\nIts values are: `[batch_size, max_decoded_length[j]]`." type: DT_INT64 number_attr: "top_paths" } output_arg { name: "log_probability" description: "A matrix, shaped: `(batch_size x top_paths)`. The\nsequence log-probabilities." type: DT_FLOAT } attr { name: "beam_width" type: "int" description: "A scalar >= 0 (beam search beam width)." has_minimum: true minimum: 1 } attr { name: "top_paths" type: "int" description: "A scalar >= 0, <= beam_width (controls output size)." has_minimum: true minimum: 1 } attr { name: "merge_repeated" type: "bool" default_value { b: true } description: "If true, merge repeated classes in output." } -} -- | Performs greedy decoding on the logits given in inputs. -- -- A note about the attribute merge_repeated: if enabled, when -- consecutive logits' maximum indices are the same, only the first of -- these is emitted. Labeling the blank '*', the sequence "A B B * B B" -- becomes "A B B" if merge_repeated = True and "A B B B B" if -- merge_repeated = False. -- -- Regardless of the value of merge_repeated, if the maximum index of a given -- time and batch corresponds to the blank, index `(num_classes - 1)`, no new -- element is emitted. cTCGreedyDecoder :: Tensor v'1 Float -- ^ __inputs__: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits. -> Tensor v'2 Data.Int.Int32 -- ^ __sequence_length__: A vector containing sequence lengths, size `(batch_size)`. -> (Tensor Build Data.Int.Int64, Tensor Build Data.Int.Int64, Tensor Build Data.Int.Int64, Tensor Build Float) -- ^ (__decoded_indices__, __decoded_values__, __decoded_shape__, __log_probability__) -- -- * __decoded_indices__: Indices matrix, size `(total_decoded_outputs x 2)`, -- of a `SparseTensor`. The rows store: [batch, time]. -- -- * __decoded_values__: Values vector, size: `(total_decoded_outputs)`, -- of a `SparseTensor`. The vector stores the decoded classes. -- -- * __decoded_shape__: Shape vector, size `(2)`, of the decoded SparseTensor. -- Values are: `[batch_size, max_decoded_length]`. -- -- * __log_probability__: Matrix, size `(batch_size x 1)`, containing sequence -- log-probabilities. cTCGreedyDecoder = cTCGreedyDecoder' id cTCGreedyDecoder' :: OpParams -> Tensor v'1 Float -- ^ __inputs__: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits. -> Tensor v'2 Data.Int.Int32 -- ^ __sequence_length__: A vector containing sequence lengths, size `(batch_size)`. -> (Tensor Build Data.Int.Int64, Tensor Build Data.Int.Int64, Tensor Build Data.Int.Int64, Tensor Build Float) -- ^ (__decoded_indices__, __decoded_values__, __decoded_shape__, __log_probability__) -- -- * __decoded_indices__: Indices matrix, size `(total_decoded_outputs x 2)`, -- of a `SparseTensor`. The rows store: [batch, time]. -- -- * __decoded_values__: Values vector, size: `(total_decoded_outputs)`, -- of a `SparseTensor`. The vector stores the decoded classes. -- -- * __decoded_shape__: Shape vector, size `(2)`, of the decoded SparseTensor. -- Values are: `[batch_size, max_decoded_length]`. -- -- * __log_probability__: Matrix, size `(batch_size x 1)`, containing sequence -- log-probabilities. cTCGreedyDecoder' op'options inputs sequence_length | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs inputs, buildInputs sequence_length] return (opDef "CTCGreedyDecoder" & op'options & opInputs .~ op'inputs) {- input_arg { name: "inputs" description: "3-D, shape: `(max_time x batch_size x num_classes)`, the logits." type: DT_FLOAT } input_arg { name: "sequence_length" description: "A vector containing sequence lengths, size `(batch_size)`." type: DT_INT32 } output_arg { name: "decoded_indices" description: "Indices matrix, size `(total_decoded_outputs x 2)`,\nof a `SparseTensor`. The rows store: [batch, time]." type: DT_INT64 } output_arg { name: "decoded_values" description: "Values vector, size: `(total_decoded_outputs)`,\nof a `SparseTensor`. The vector stores the decoded classes." type: DT_INT64 } output_arg { name: "decoded_shape" description: "Shape vector, size `(2)`, of the decoded SparseTensor.\nValues are: `[batch_size, max_decoded_length]`." type: DT_INT64 } output_arg { name: "log_probability" description: "Matrix, size `(batch_size x 1)`, containing sequence\nlog-probabilities." type: DT_FLOAT } attr { name: "merge_repeated" type: "bool" default_value { b: false } description: "If True, merge repeated classes in output." } -} -- | Calculates the CTC Loss (log probability) for each batch entry. Also calculates -- -- the gradient. This class performs the softmax operation for you, so inputs -- should be e.g. linear projections of outputs by an LSTM. cTCLoss :: Tensor v'1 Float -- ^ __inputs__: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits. -> Tensor v'2 Data.Int.Int64 -- ^ __labels_indices__: The indices of a `SparseTensor`. -- `labels_indices(i, :) == [b, t]` means `labels_values(i)` stores the id for -- `(batch b, time t)`. -> Tensor v'3 Data.Int.Int32 -- ^ __labels_values__: The values (labels) associated with the given batch and time. -> Tensor v'4 Data.Int.Int32 -- ^ __sequence_length__: A vector containing sequence lengths (batch). -> (Tensor Build Float, Tensor Build Float) -- ^ (__loss__, __gradient__) -- -- * __loss__: A vector (batch) containing log-probabilities. -- -- * __gradient__: The gradient of `loss`. 3-D, shape: -- `(max_time x batch_size x num_classes)`. cTCLoss = cTCLoss' id cTCLoss' :: OpParams -> Tensor v'1 Float -- ^ __inputs__: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits. -> Tensor v'2 Data.Int.Int64 -- ^ __labels_indices__: The indices of a `SparseTensor`. -- `labels_indices(i, :) == [b, t]` means `labels_values(i)` stores the id for -- `(batch b, time t)`. -> Tensor v'3 Data.Int.Int32 -- ^ __labels_values__: The values (labels) associated with the given batch and time. -> Tensor v'4 Data.Int.Int32 -- ^ __sequence_length__: A vector containing sequence lengths (batch). -> (Tensor Build Float, Tensor Build Float) -- ^ (__loss__, __gradient__) -- -- * __loss__: A vector (batch) containing log-probabilities. -- -- * __gradient__: The gradient of `loss`. 3-D, shape: -- `(max_time x batch_size x num_classes)`. cTCLoss' op'options inputs labels_indices labels_values sequence_length | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs inputs, buildInputs labels_indices, buildInputs labels_values, buildInputs sequence_length] return (opDef "CTCLoss" & op'options & opInputs .~ op'inputs) {- input_arg { name: "inputs" description: "3-D, shape: `(max_time x batch_size x num_classes)`, the logits." type: DT_FLOAT } input_arg { name: "labels_indices" description: "The indices of a `SparseTensor`.\n`labels_indices(i, :) == [b, t]` means `labels_values(i)` stores the id for\n`(batch b, time t)`." type: DT_INT64 } input_arg { name: "labels_values" description: "The values (labels) associated with the given batch and time." type: DT_INT32 } input_arg { name: "sequence_length" description: "A vector containing sequence lengths (batch)." type: DT_INT32 } output_arg { name: "loss" description: "A vector (batch) containing log-probabilities." type: DT_FLOAT } output_arg { name: "gradient" description: "The gradient of `loss`. 3-D, shape:\n`(max_time x batch_size x num_classes)`." type: DT_FLOAT } attr { name: "preprocess_collapse_repeated" type: "bool" default_value { b: false } description: "Scalar, if true then repeated labels are\ncollapsed prior to the CTC calculation." } attr { name: "ctc_merge_repeated" type: "bool" default_value { b: true } description: "Scalar. If set to false, *during* CTC calculation\nrepeated non-blank labels will not be merged and are interpreted as\nindividual labels. This is a simplified version of CTC." } -} -- | Cast x of type SrcT to y of DstT. cast :: forall v'1 srcT dstT . (TensorType srcT, TensorType dstT) => Tensor v'1 srcT -- ^ __x__ -> Tensor Build dstT -- ^ __y__ cast = cast' id cast' :: forall v'1 srcT dstT . (TensorType srcT, TensorType dstT) => OpParams -> Tensor v'1 srcT -- ^ __x__ -> Tensor Build dstT -- ^ __y__ cast' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "Cast" & opAttr "SrcT" .~ tensorType (undefined :: srcT) & opAttr "DstT" .~ tensorType (undefined :: dstT) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "SrcT" } output_arg { name: "y" type_attr: "DstT" } attr { name: "SrcT" type: "type" } attr { name: "DstT" type: "type" } -} -- | Returns element-wise smallest integer in not less than x. ceil :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ ceil = ceil' id ceil' :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ ceil' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "Ceil" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } -} -- | Checks a tensor for NaN and Inf values. -- -- When run, reports an `InvalidArgument` error if `tensor` has any values -- that are not a number (NaN) or infinity (Inf). Otherwise, passes `tensor` as-is. checkNumerics :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __tensor__ -> Tensor Build t -- ^ __output__ checkNumerics = checkNumerics' id checkNumerics' :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __tensor__ -> Tensor Build t -- ^ __output__ checkNumerics' op'options tensor | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs tensor] return (opDef "CheckNumerics" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "tensor" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "message" type: "string" description: "Prefix of the error message." } -} -- | Computes the Cholesky decomposition of one or more square matrices. -- -- The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions -- form square matrices, with the same constraints as the single matrix Cholesky -- decomposition above. The output is a tensor of the same shape as the input -- containing the Cholesky decompositions for all input submatrices `[..., :, :]`. cholesky :: forall v'1 t . (OneOf '[Double, Float] t) => Tensor v'1 t -- ^ __input__: Shape is `[..., M, M]`. -> Tensor Build t -- ^ __output__: Shape is `[..., M, M]`. cholesky = cholesky' id cholesky' :: forall v'1 t . (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__: Shape is `[..., M, M]`. -> Tensor Build t -- ^ __output__: Shape is `[..., M, M]`. cholesky' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "Cholesky" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "Shape is `[..., M, M]`." type_attr: "T" } output_arg { name: "output" description: "Shape is `[..., M, M]`." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } } } -} -- | Computes the reverse mode backpropagated gradient of the Cholesky algorithm. -- -- For an explanation see "Differentiation of the Cholesky algorithm" by -- Iain Murray http://arxiv.org/abs/1602.07527. choleskyGrad :: forall v'1 v'2 t . (OneOf '[Double, Float] t) => Tensor v'1 t -- ^ __l__: Output of batch Cholesky algorithm l = cholesky(A). Shape is `[..., M, M]`. -- Algorithm depends only on lower triangular part of the innermost matrices of -- this tensor. -> Tensor v'2 t -- ^ __grad__: df/dl where f is some scalar function. Shape is `[..., M, M]`. -- Algorithm depends only on lower triangular part of the innermost matrices of -- this tensor. -> Tensor Build t -- ^ __output__: Symmetrized version of df/dA . Shape is `[..., M, M]` choleskyGrad = choleskyGrad' id choleskyGrad' :: forall v'1 v'2 t . (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __l__: Output of batch Cholesky algorithm l = cholesky(A). Shape is `[..., M, M]`. -- Algorithm depends only on lower triangular part of the innermost matrices of -- this tensor. -> Tensor v'2 t -- ^ __grad__: df/dl where f is some scalar function. Shape is `[..., M, M]`. -- Algorithm depends only on lower triangular part of the innermost matrices of -- this tensor. -> Tensor Build t -- ^ __output__: Symmetrized version of df/dA . Shape is `[..., M, M]` choleskyGrad' op'options l grad | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs l, buildInputs grad] return (opDef "CholeskyGrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "l" description: "Output of batch Cholesky algorithm l = cholesky(A). Shape is `[..., M, M]`.\nAlgorithm depends only on lower triangular part of the innermost matrices of\nthis tensor." type_attr: "T" } input_arg { name: "grad" description: "df/dl where f is some scalar function. Shape is `[..., M, M]`.\nAlgorithm depends only on lower triangular part of the innermost matrices of\nthis tensor." type_attr: "T" } output_arg { name: "output" description: "Symmetrized version of df/dA . Shape is `[..., M, M]`" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } -} -- | Converts two real numbers to a complex number. -- -- Given a tensor `real` representing the real part of a complex number, and a -- tensor `imag` representing the imaginary part of a complex number, this -- operation returns complex numbers elementwise of the form \\(a + bj\\), where -- *a* represents the `real` part and *b* represents the `imag` part. -- -- The input tensors `real` and `imag` must have the same shape. -- -- For example: -- -- ``` -- # tensor 'real' is [2.25, 3.25] -- # tensor `imag` is [4.75, 5.75] -- tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]] -- ``` complex :: forall v'1 v'2 t tout . (OneOf '[Double, Float] t, OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float)] tout) => Tensor v'1 t -- ^ __real__ -> Tensor v'2 t -- ^ __imag__ -> Tensor Build tout -- ^ __out__ complex = complex' id complex' :: forall v'1 v'2 t tout . (OneOf '[Double, Float] t, OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float)] tout) => OpParams -> Tensor v'1 t -- ^ __real__ -> Tensor v'2 t -- ^ __imag__ -> Tensor Build tout -- ^ __out__ complex' op'options real imag | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs real, buildInputs imag] return (opDef "Complex" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tout" .~ tensorType (undefined :: tout) & op'options & opInputs .~ op'inputs) {- input_arg { name: "real" type_attr: "T" } input_arg { name: "imag" type_attr: "T" } output_arg { name: "out" type_attr: "Tout" } attr { name: "T" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "Tout" type: "type" default_value { type: DT_COMPLEX64 } allowed_values { list { type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | Computes the complex absolute value of a tensor. -- -- Given a tensor `x` of complex numbers, this operation returns a tensor of type -- `float` or `double` that is the absolute value of each element in `x`. All -- elements in `x` must be complex numbers of the form \\(a + bj\\). The absolute -- value is computed as \\( \sqrt{a^2 + b^2}\\). complexAbs :: forall v'1 t tout . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float)] t, OneOf '[Double, Float] tout) => Tensor v'1 t -- ^ __x__ -> Tensor Build tout -- ^ __y__ complexAbs = complexAbs' id complexAbs' :: forall v'1 t tout . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float)] t, OneOf '[Double, Float] tout) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build tout -- ^ __y__ complexAbs' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "ComplexAbs" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tout" .~ tensorType (undefined :: tout) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "Tout" } attr { name: "T" type: "type" default_value { type: DT_COMPLEX64 } allowed_values { list { type: DT_COMPLEX64 type: DT_COMPLEX128 } } } attr { name: "Tout" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } -} -- | Computes the ids of the positions in sampled_candidates that match true_labels. -- -- When doing log-odds NCE, the result of this op should be passed through a -- SparseToDense op, then added to the logits of the sampled candidates. This has -- the effect of 'removing' the sampled labels that match the true labels by -- making the classifier sure that they are sampled labels. computeAccidentalHits :: Data.Int.Int64 -- ^ __num_true__: Number of true labels per context. -> Tensor v'1 Data.Int.Int64 -- ^ __true_classes__: The true_classes output of UnpackSparseLabels. -> Tensor v'2 Data.Int.Int64 -- ^ __sampled_candidates__: The sampled_candidates output of CandidateSampler. -> (Tensor Build Data.Int.Int32, Tensor Build Data.Int.Int64, Tensor Build Float) -- ^ (__indices__, __ids__, __weights__) -- -- * __indices__: A vector of indices corresponding to rows of true_candidates. -- -- * __ids__: A vector of IDs of positions in sampled_candidates that match a true_label -- for the row with the corresponding index in indices. -- -- * __weights__: A vector of the same length as indices and ids, in which each element -- is -FLOAT_MAX. computeAccidentalHits = computeAccidentalHits' id computeAccidentalHits' :: OpParams -> Data.Int.Int64 -- ^ __num_true__: Number of true labels per context. -> Tensor v'1 Data.Int.Int64 -- ^ __true_classes__: The true_classes output of UnpackSparseLabels. -> Tensor v'2 Data.Int.Int64 -- ^ __sampled_candidates__: The sampled_candidates output of CandidateSampler. -> (Tensor Build Data.Int.Int32, Tensor Build Data.Int.Int64, Tensor Build Float) -- ^ (__indices__, __ids__, __weights__) -- -- * __indices__: A vector of indices corresponding to rows of true_candidates. -- -- * __ids__: A vector of IDs of positions in sampled_candidates that match a true_label -- for the row with the corresponding index in indices. -- -- * __weights__: A vector of the same length as indices and ids, in which each element -- is -FLOAT_MAX. computeAccidentalHits' op'options num_true true_classes sampled_candidates | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs true_classes, buildInputs sampled_candidates] return (opDef "ComputeAccidentalHits" & opAttr "num_true" .~ num_true & op'options & opInputs .~ op'inputs) {- input_arg { name: "true_classes" description: "The true_classes output of UnpackSparseLabels." type: DT_INT64 } input_arg { name: "sampled_candidates" description: "The sampled_candidates output of CandidateSampler." type: DT_INT64 } output_arg { name: "indices" description: "A vector of indices corresponding to rows of true_candidates." type: DT_INT32 } output_arg { name: "ids" description: "A vector of IDs of positions in sampled_candidates that match a true_label\nfor the row with the corresponding index in indices." type: DT_INT64 } output_arg { name: "weights" description: "A vector of the same length as indices and ids, in which each element\nis -FLOAT_MAX." type: DT_FLOAT } attr { name: "num_true" type: "int" description: "Number of true labels per context." } attr { name: "seed" type: "int" default_value { i: 0 } description: "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed." } attr { name: "seed2" type: "int" default_value { i: 0 } description: "An second seed to avoid seed collision." } -} -- | Concatenates tensors along one dimension. concat :: forall v'1 v'2 t . (TensorType t) => Tensor v'1 Data.Int.Int32 -- ^ __concat_dim__: 0-D. The dimension along which to concatenate. Must be in the -- range [0, rank(values)). -> [Tensor v'2 t] -- ^ __values__: The `N` Tensors to concatenate. Their ranks and types must match, -- and their sizes must match in all dimensions except `concat_dim`. -> Tensor Build t -- ^ __output__: A `Tensor` with the concatenation of values stacked along the -- `concat_dim` dimension. This tensor's shape matches that of `values` except -- in `concat_dim` where it has the sum of the sizes. concat = concat' id concat' :: forall v'1 v'2 t . (TensorType t) => OpParams -> Tensor v'1 Data.Int.Int32 -- ^ __concat_dim__: 0-D. The dimension along which to concatenate. Must be in the -- range [0, rank(values)). -> [Tensor v'2 t] -- ^ __values__: The `N` Tensors to concatenate. Their ranks and types must match, -- and their sizes must match in all dimensions except `concat_dim`. -> Tensor Build t -- ^ __output__: A `Tensor` with the concatenation of values stacked along the -- `concat_dim` dimension. This tensor's shape matches that of `values` except -- in `concat_dim` where it has the sum of the sizes. concat' op'options concat_dim values | eqLengthGuard [("N", [("values", length values)])] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs concat_dim, buildInputs values] return (opDef "Concat" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "N" .~ n & op'options & opInputs .~ op'inputs) where n = fromIntegral (length values) :: Int64 {- input_arg { name: "concat_dim" description: "0-D. The dimension along which to concatenate. Must be in the\nrange [0, rank(values))." type: DT_INT32 } input_arg { name: "values" description: "The `N` Tensors to concatenate. Their ranks and types must match,\nand their sizes must match in all dimensions except `concat_dim`." type_attr: "T" number_attr: "N" } output_arg { name: "output" description: "A `Tensor` with the concatenation of values stacked along the\n`concat_dim` dimension. This tensor\'s shape matches that of `values` except\nin `concat_dim` where it has the sum of the sizes." type_attr: "T" } attr { name: "N" type: "int" has_minimum: true minimum: 2 } attr { name: "T" type: "type" } -} -- | Computes offsets of concat inputs within its output. -- -- For example: -- -- ```prettyprint -- # 'x' is [2, 2, 7] -- # 'y' is [2, 3, 7] -- # 'z' is [2, 5, 7] -- concat_offset(2, [x, y, z]) => [0, 0, 0], [0, 2, 0], [0, 5, 0] -- ``` concatOffset :: Tensor v'1 Data.Int.Int32 -- ^ __concat_dim__: The dimension along which to concatenate. -> [Tensor v'2 Data.Int.Int32] -- ^ __shape__: The `N` int32 vectors representing shape of tensors being concatenated. -> [Tensor Build Data.Int.Int32] -- ^ __offset__: The `N` int32 vectors representing the starting offset -- of input tensors within the concatenated output. -- -- This is typically used by gradient computations for a concat operation. concatOffset = concatOffset' id concatOffset' :: OpParams -> Tensor v'1 Data.Int.Int32 -- ^ __concat_dim__: The dimension along which to concatenate. -> [Tensor v'2 Data.Int.Int32] -- ^ __shape__: The `N` int32 vectors representing shape of tensors being concatenated. -> [Tensor Build Data.Int.Int32] -- ^ __offset__: The `N` int32 vectors representing the starting offset -- of input tensors within the concatenated output. -- -- This is typically used by gradient computations for a concat operation. concatOffset' op'options concat_dim shape | eqLengthGuard [("N", [("shape", length shape)])] = pureOp [n] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs concat_dim, buildInputs shape] return (opDef "ConcatOffset" & opAttr "N" .~ n & op'options & opInputs .~ op'inputs) where n = fromIntegral (length shape) :: Int64 {- input_arg { name: "concat_dim" description: "The dimension along which to concatenate." type: DT_INT32 } input_arg { name: "shape" description: "The `N` int32 vectors representing shape of tensors being concatenated." type: DT_INT32 number_attr: "N" } output_arg { name: "offset" description: "The `N` int32 vectors representing the starting offset\n of input tensors within the concatenated output.\n\nThis is typically used by gradient computations for a concat operation." type: DT_INT32 number_attr: "N" } attr { name: "N" type: "int" has_minimum: true minimum: 2 } -} -- | Concatenates tensors along one dimension. concatV2 :: forall v'1 v'2 t tidx . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => [Tensor v'1 t] -- ^ __values__: List of `N` Tensors to concatenate. Their ranks and types must match, -- and their sizes must match in all dimensions except `concat_dim`. -> Tensor v'2 tidx -- ^ __axis__: 0-D. The dimension along which to concatenate. Must be in the -- range [-rank(values), rank(values)). -> Tensor Build t -- ^ __output__: A `Tensor` with the concatenation of values stacked along the -- `concat_dim` dimension. This tensor's shape matches that of `values` except -- in `concat_dim` where it has the sum of the sizes. concatV2 = concatV2' id concatV2' :: forall v'1 v'2 t tidx . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => OpParams -> [Tensor v'1 t] -- ^ __values__: List of `N` Tensors to concatenate. Their ranks and types must match, -- and their sizes must match in all dimensions except `concat_dim`. -> Tensor v'2 tidx -- ^ __axis__: 0-D. The dimension along which to concatenate. Must be in the -- range [-rank(values), rank(values)). -> Tensor Build t -- ^ __output__: A `Tensor` with the concatenation of values stacked along the -- `concat_dim` dimension. This tensor's shape matches that of `values` except -- in `concat_dim` where it has the sum of the sizes. concatV2' op'options values axis | eqLengthGuard [("N", [("values", length values)])] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs values, buildInputs axis] return (opDef "ConcatV2" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tidx" .~ tensorType (undefined :: tidx) & opAttr "N" .~ n & op'options & opInputs .~ op'inputs) where n = fromIntegral (length values) :: Int64 {- input_arg { name: "values" description: "List of `N` Tensors to concatenate. Their ranks and types must match,\nand their sizes must match in all dimensions except `concat_dim`." type_attr: "T" number_attr: "N" } input_arg { name: "axis" description: "0-D. The dimension along which to concatenate. Must be in the\nrange [-rank(values), rank(values))." type_attr: "Tidx" } output_arg { name: "output" description: "A `Tensor` with the concatenation of values stacked along the\n`concat_dim` dimension. This tensor\'s shape matches that of `values` except\nin `concat_dim` where it has the sum of the sizes." type_attr: "T" } attr { name: "N" type: "int" has_minimum: true minimum: 2 } attr { name: "T" type: "type" } attr { name: "Tidx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | A conditional accumulator for aggregating gradients. The accumulator accepts -- -- gradients marked with local_step greater or equal to the most recent global_step -- known to the accumulator. The average can be extracted from the accumulator, -- provided sufficient gradients have been accumulated. Extracting the average -- automatically resets the aggregate to 0, and increments the global_step recorded -- by the accumulator. conditionalAccumulator :: forall m' . (MonadBuild m') => DataType -- ^ __dtype__: The type of the value being accumulated. -> Shape -- ^ __shape__: The shape of the values, can be [], in which case shape is unknown. -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __handle__: The handle to the accumulator. conditionalAccumulator = conditionalAccumulator' id conditionalAccumulator' :: forall m' . (MonadBuild m') => OpParams -> DataType -- ^ __dtype__: The type of the value being accumulated. -> Shape -- ^ __shape__: The shape of the values, can be [], in which case shape is unknown. -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __handle__: The handle to the accumulator. conditionalAccumulator' op'options dtype shape | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "ConditionalAccumulator" & opAttr "dtype" .~ dtype & opAttr "shape" .~ shape & op'options & opInputs .~ op'inputs) {- output_arg { name: "handle" description: "The handle to the accumulator." type: DT_STRING is_ref: true } attr { name: "dtype" type: "type" description: "The type of the value being accumulated." allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "shape" type: "shape" description: "The shape of the values, can be [], in which case shape is unknown." } attr { name: "container" type: "string" default_value { s: "" } description: "If non-empty, this accumulator is placed in the given container.\nOtherwise, a default container is used." } attr { name: "shared_name" type: "string" default_value { s: "" } description: "If non-empty, this accumulator will be shared under the given name\nacross multiple sessions." } -} -- | Returns the complex conjugate of a complex number. -- -- Given a tensor `input` of complex numbers, this operation returns a tensor of -- complex numbers that are the complex conjugate of each element in `input`. The -- complex numbers in `input` must be of the form \\(a + bj\\), where *a* is the -- real part and *b* is the imaginary part. -- -- The complex conjugate returned by this operation is of the form \\(a - bj\\). -- -- For example: -- -- ``` -- # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] -- tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j] -- ``` conj :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float)] t) => Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ conj = conj' id conj' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float)] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ conj' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "Conj" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" default_value { type: DT_COMPLEX64 } allowed_values { list { type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | Returns a constant tensor. const :: forall dtype . (TensorType dtype) => Tensor Build dtype -- ^ __output__ const = const' id const' :: forall dtype . (TensorType dtype) => OpParams -> Tensor Build dtype -- ^ __output__ const' op'options | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] return (opDef "Const" & opAttr "dtype" .~ tensorType (undefined :: dtype) & op'options & opInputs .~ op'inputs) {- output_arg { name: "output" type_attr: "dtype" } attr { name: "value" type: "tensor" description: "Attr `value` is the tensor to return." } attr { name: "dtype" type: "type" } -} -- | Does nothing. Serves as a control trigger for scheduling. -- -- Only useful as a placeholder for control edges. controlTrigger :: forall m' . (MonadBuild m') => m' (ControlNode) controlTrigger = controlTrigger' id controlTrigger' :: forall m' . (MonadBuild m') => OpParams -> m' (ControlNode) controlTrigger' op'options | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "ControlTrigger" & op'options & opInputs .~ op'inputs) {- -} -- | Computes a 2-D convolution given 4-D `input` and `filter` tensors. -- -- Given an input tensor of shape `[batch, in_height, in_width, in_channels]` -- and a filter / kernel tensor of shape -- `[filter_height, filter_width, in_channels, out_channels]`, this op -- performs the following: -- -- 1. Flattens the filter to a 2-D matrix with shape -- `[filter_height * filter_width * in_channels, output_channels]`. -- 2. Extracts image patches from the input tensor to form a *virtual* -- tensor of shape `[batch, out_height, out_width, -- filter_height * filter_width * in_channels]`. -- 3. For each patch, right-multiplies the filter matrix and the image patch -- vector. -- -- In detail, with the default NHWC format, -- -- output[b, i, j, k] = -- sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] * -- filter[di, dj, q, k] -- -- Must have `strides[0] = strides[3] = 1`. For the most common case of the same -- horizontal and vertices strides, `strides = [1, stride, stride, 1]`. conv2D :: forall v'1 v'2 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __input__: A 4-D tensor. The dimension order is interpreted according to the value -- of `data_format`, see below for details. -> Tensor v'2 t -- ^ __filter__: A 4-D tensor of shape -- `[filter_height, filter_width, in_channels, out_channels]` -> Tensor Build t -- ^ __output__: A 4-D tensor. The dimension order is determined by the value of -- `data_format`, see below for details. conv2D = conv2D' id conv2D' :: forall v'1 v'2 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__: A 4-D tensor. The dimension order is interpreted according to the value -- of `data_format`, see below for details. -> Tensor v'2 t -- ^ __filter__: A 4-D tensor of shape -- `[filter_height, filter_width, in_channels, out_channels]` -> Tensor Build t -- ^ __output__: A 4-D tensor. The dimension order is determined by the value of -- `data_format`, see below for details. conv2D' op'options input filter | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs filter] return (opDef "Conv2D" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "A 4-D tensor. The dimension order is interpreted according to the value\nof `data_format`, see below for details." type_attr: "T" } input_arg { name: "filter" description: "A 4-D tensor of shape\n`[filter_height, filter_width, in_channels, out_channels]`" type_attr: "T" } output_arg { name: "output" description: "A 4-D tensor. The dimension order is determined by the value of\n`data_format`, see below for details." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "strides" type: "list(int)" description: "1-D tensor of length 4. The stride of the sliding window for each\ndimension of `input`. The dimension order is determined by the value of\n `data_format`, see below for details." } attr { name: "use_cudnn_on_gpu" type: "bool" default_value { b: true } } attr { name: "padding" type: "string" description: "The type of padding algorithm to use." allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "data_format" type: "string" default_value { s: "NHWC" } description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n [batch, height, width, channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n [batch, channels, height, width]." allowed_values { list { s: "NHWC" s: "NCHW" } } } -} -- | Computes the gradients of convolution with respect to the filter. conv2DBackpropFilter :: forall v'1 v'2 v'3 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __input__: 4-D with shape `[batch, in_height, in_width, in_channels]`. -> Tensor v'2 Data.Int.Int32 -- ^ __filter_sizes__: An integer vector representing the tensor shape of `filter`, -- where `filter` is a 4-D -- `[filter_height, filter_width, in_channels, out_channels]` tensor. -> Tensor v'3 t -- ^ __out_backprop__: 4-D with shape `[batch, out_height, out_width, out_channels]`. -- Gradients w.r.t. the output of the convolution. -> Tensor Build t -- ^ __output__: 4-D with shape -- `[filter_height, filter_width, in_channels, out_channels]`. Gradient w.r.t. -- the `filter` input of the convolution. conv2DBackpropFilter = conv2DBackpropFilter' id conv2DBackpropFilter' :: forall v'1 v'2 v'3 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__: 4-D with shape `[batch, in_height, in_width, in_channels]`. -> Tensor v'2 Data.Int.Int32 -- ^ __filter_sizes__: An integer vector representing the tensor shape of `filter`, -- where `filter` is a 4-D -- `[filter_height, filter_width, in_channels, out_channels]` tensor. -> Tensor v'3 t -- ^ __out_backprop__: 4-D with shape `[batch, out_height, out_width, out_channels]`. -- Gradients w.r.t. the output of the convolution. -> Tensor Build t -- ^ __output__: 4-D with shape -- `[filter_height, filter_width, in_channels, out_channels]`. Gradient w.r.t. -- the `filter` input of the convolution. conv2DBackpropFilter' op'options input filter_sizes out_backprop | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs filter_sizes, buildInputs out_backprop] return (opDef "Conv2DBackpropFilter" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "4-D with shape `[batch, in_height, in_width, in_channels]`." type_attr: "T" } input_arg { name: "filter_sizes" description: "An integer vector representing the tensor shape of `filter`,\nwhere `filter` is a 4-D\n`[filter_height, filter_width, in_channels, out_channels]` tensor." type: DT_INT32 } input_arg { name: "out_backprop" description: "4-D with shape `[batch, out_height, out_width, out_channels]`.\nGradients w.r.t. the output of the convolution." type_attr: "T" } output_arg { name: "output" description: "4-D with shape\n`[filter_height, filter_width, in_channels, out_channels]`. Gradient w.r.t.\nthe `filter` input of the convolution." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "strides" type: "list(int)" description: "The stride of the sliding window for each dimension of the input\nof the convolution. Must be in the same order as the dimension specified with\nformat." } attr { name: "use_cudnn_on_gpu" type: "bool" default_value { b: true } } attr { name: "padding" type: "string" description: "The type of padding algorithm to use." allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "data_format" type: "string" default_value { s: "NHWC" } description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n [batch, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n [batch, in_channels, in_height, in_width]." allowed_values { list { s: "NHWC" s: "NCHW" } } } -} -- | Computes the gradients of convolution with respect to the input. conv2DBackpropInput :: forall v'1 v'2 v'3 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 Data.Int.Int32 -- ^ __input_sizes__: An integer vector representing the shape of `input`, -- where `input` is a 4-D `[batch, height, width, channels]` tensor. -> Tensor v'2 t -- ^ __filter__: 4-D with shape -- `[filter_height, filter_width, in_channels, out_channels]`. -> Tensor v'3 t -- ^ __out_backprop__: 4-D with shape `[batch, out_height, out_width, out_channels]`. -- Gradients w.r.t. the output of the convolution. -> Tensor Build t -- ^ __output__: 4-D with shape `[batch, in_height, in_width, in_channels]`. Gradient -- w.r.t. the input of the convolution. conv2DBackpropInput = conv2DBackpropInput' id conv2DBackpropInput' :: forall v'1 v'2 v'3 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 Data.Int.Int32 -- ^ __input_sizes__: An integer vector representing the shape of `input`, -- where `input` is a 4-D `[batch, height, width, channels]` tensor. -> Tensor v'2 t -- ^ __filter__: 4-D with shape -- `[filter_height, filter_width, in_channels, out_channels]`. -> Tensor v'3 t -- ^ __out_backprop__: 4-D with shape `[batch, out_height, out_width, out_channels]`. -- Gradients w.r.t. the output of the convolution. -> Tensor Build t -- ^ __output__: 4-D with shape `[batch, in_height, in_width, in_channels]`. Gradient -- w.r.t. the input of the convolution. conv2DBackpropInput' op'options input_sizes filter out_backprop | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_sizes, buildInputs filter, buildInputs out_backprop] return (opDef "Conv2DBackpropInput" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_sizes" description: "An integer vector representing the shape of `input`,\nwhere `input` is a 4-D `[batch, height, width, channels]` tensor." type: DT_INT32 } input_arg { name: "filter" description: "4-D with shape\n`[filter_height, filter_width, in_channels, out_channels]`." type_attr: "T" } input_arg { name: "out_backprop" description: "4-D with shape `[batch, out_height, out_width, out_channels]`.\nGradients w.r.t. the output of the convolution." type_attr: "T" } output_arg { name: "output" description: "4-D with shape `[batch, in_height, in_width, in_channels]`. Gradient\nw.r.t. the input of the convolution." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "strides" type: "list(int)" description: "The stride of the sliding window for each dimension of the input\nof the convolution. Must be in the same order as the dimension specified with\nformat." } attr { name: "use_cudnn_on_gpu" type: "bool" default_value { b: true } } attr { name: "padding" type: "string" description: "The type of padding algorithm to use." allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "data_format" type: "string" default_value { s: "NHWC" } description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n [batch, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n [batch, in_channels, in_height, in_width]." allowed_values { list { s: "NHWC" s: "NCHW" } } } -} -- | Computes a 3-D convolution given 5-D `input` and `filter` tensors. -- -- In signal processing, cross-correlation is a measure of similarity of -- two waveforms as a function of a time-lag applied to one of them. This -- is also known as a sliding dot product or sliding inner-product. -- -- Our Conv3D implements a form of cross-correlation. conv3D :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __input__: Shape `[batch, in_depth, in_height, in_width, in_channels]`. -> Tensor v'2 t -- ^ __filter__: Shape `[filter_depth, filter_height, filter_width, in_channels, -- out_channels]`. `in_channels` must match between `input` and `filter`. -> Tensor Build t -- ^ __output__ conv3D = conv3D' id conv3D' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__: Shape `[batch, in_depth, in_height, in_width, in_channels]`. -> Tensor v'2 t -- ^ __filter__: Shape `[filter_depth, filter_height, filter_width, in_channels, -- out_channels]`. `in_channels` must match between `input` and `filter`. -> Tensor Build t -- ^ __output__ conv3D' op'options input filter | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs filter] return (opDef "Conv3D" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "Shape `[batch, in_depth, in_height, in_width, in_channels]`." type_attr: "T" } input_arg { name: "filter" description: "Shape `[filter_depth, filter_height, filter_width, in_channels,\nout_channels]`. `in_channels` must match between `input` and `filter`." type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "strides" type: "list(int)" description: "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`." has_minimum: true minimum: 5 } attr { name: "padding" type: "string" description: "The type of padding algorithm to use." allowed_values { list { s: "SAME" s: "VALID" } } } -} -- | Computes the gradients of 3-D convolution with respect to the filter. conv3DBackpropFilter :: forall v'1 v'2 v'3 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __input__: Shape `[batch, depth, rows, cols, in_channels]`. -> Tensor v'2 t -- ^ __filter__: Shape `[depth, rows, cols, in_channels, out_channels]`. -- `in_channels` must match between `input` and `filter`. -> Tensor v'3 t -- ^ __out_backprop__: Backprop signal of shape `[batch, out_depth, out_rows, out_cols, -- out_channels]`. -> Tensor Build t -- ^ __output__ conv3DBackpropFilter = conv3DBackpropFilter' id conv3DBackpropFilter' :: forall v'1 v'2 v'3 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__: Shape `[batch, depth, rows, cols, in_channels]`. -> Tensor v'2 t -- ^ __filter__: Shape `[depth, rows, cols, in_channels, out_channels]`. -- `in_channels` must match between `input` and `filter`. -> Tensor v'3 t -- ^ __out_backprop__: Backprop signal of shape `[batch, out_depth, out_rows, out_cols, -- out_channels]`. -> Tensor Build t -- ^ __output__ conv3DBackpropFilter' op'options input filter out_backprop | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs filter, buildInputs out_backprop] return (opDef "Conv3DBackpropFilter" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "Shape `[batch, depth, rows, cols, in_channels]`." type_attr: "T" } input_arg { name: "filter" description: "Shape `[depth, rows, cols, in_channels, out_channels]`.\n`in_channels` must match between `input` and `filter`." type_attr: "T" } input_arg { name: "out_backprop" description: "Backprop signal of shape `[batch, out_depth, out_rows, out_cols,\nout_channels]`." type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "strides" type: "list(int)" description: "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`." has_minimum: true minimum: 5 } attr { name: "padding" type: "string" description: "The type of padding algorithm to use." allowed_values { list { s: "SAME" s: "VALID" } } } -} -- | Computes the gradients of 3-D convolution with respect to the filter. conv3DBackpropFilterV2 :: forall v'1 v'2 v'3 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __input__: Shape `[batch, depth, rows, cols, in_channels]`. -> Tensor v'2 Data.Int.Int32 -- ^ __filter_sizes__: An integer vector representing the tensor shape of `filter`, -- where `filter` is a 5-D -- `[filter_depth, filter_height, filter_width, in_channels, out_channels]` -- tensor. -> Tensor v'3 t -- ^ __out_backprop__: Backprop signal of shape `[batch, out_depth, out_rows, out_cols, -- out_channels]`. -> Tensor Build t -- ^ __output__ conv3DBackpropFilterV2 = conv3DBackpropFilterV2' id conv3DBackpropFilterV2' :: forall v'1 v'2 v'3 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__: Shape `[batch, depth, rows, cols, in_channels]`. -> Tensor v'2 Data.Int.Int32 -- ^ __filter_sizes__: An integer vector representing the tensor shape of `filter`, -- where `filter` is a 5-D -- `[filter_depth, filter_height, filter_width, in_channels, out_channels]` -- tensor. -> Tensor v'3 t -- ^ __out_backprop__: Backprop signal of shape `[batch, out_depth, out_rows, out_cols, -- out_channels]`. -> Tensor Build t -- ^ __output__ conv3DBackpropFilterV2' op'options input filter_sizes out_backprop | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs filter_sizes, buildInputs out_backprop] return (opDef "Conv3DBackpropFilterV2" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "Shape `[batch, depth, rows, cols, in_channels]`." type_attr: "T" } input_arg { name: "filter_sizes" description: "An integer vector representing the tensor shape of `filter`,\nwhere `filter` is a 5-D\n`[filter_depth, filter_height, filter_width, in_channels, out_channels]`\ntensor." type: DT_INT32 } input_arg { name: "out_backprop" description: "Backprop signal of shape `[batch, out_depth, out_rows, out_cols,\nout_channels]`." type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "strides" type: "list(int)" description: "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`." has_minimum: true minimum: 5 } attr { name: "padding" type: "string" description: "The type of padding algorithm to use." allowed_values { list { s: "SAME" s: "VALID" } } } -} -- | Computes the gradients of 3-D convolution with respect to the input. conv3DBackpropInput :: forall v'1 v'2 v'3 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __input__: Shape `[batch, depth, rows, cols, in_channels]`. -> Tensor v'2 t -- ^ __filter__: Shape `[depth, rows, cols, in_channels, out_channels]`. -- `in_channels` must match between `input` and `filter`. -> Tensor v'3 t -- ^ __out_backprop__: Backprop signal of shape `[batch, out_depth, out_rows, out_cols, -- out_channels]`. -> Tensor Build t -- ^ __output__ conv3DBackpropInput = conv3DBackpropInput' id conv3DBackpropInput' :: forall v'1 v'2 v'3 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__: Shape `[batch, depth, rows, cols, in_channels]`. -> Tensor v'2 t -- ^ __filter__: Shape `[depth, rows, cols, in_channels, out_channels]`. -- `in_channels` must match between `input` and `filter`. -> Tensor v'3 t -- ^ __out_backprop__: Backprop signal of shape `[batch, out_depth, out_rows, out_cols, -- out_channels]`. -> Tensor Build t -- ^ __output__ conv3DBackpropInput' op'options input filter out_backprop | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs filter, buildInputs out_backprop] return (opDef "Conv3DBackpropInput" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "Shape `[batch, depth, rows, cols, in_channels]`." type_attr: "T" } input_arg { name: "filter" description: "Shape `[depth, rows, cols, in_channels, out_channels]`.\n`in_channels` must match between `input` and `filter`." type_attr: "T" } input_arg { name: "out_backprop" description: "Backprop signal of shape `[batch, out_depth, out_rows, out_cols,\nout_channels]`." type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "strides" type: "list(int)" description: "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`." has_minimum: true minimum: 5 } attr { name: "padding" type: "string" description: "The type of padding algorithm to use." allowed_values { list { s: "SAME" s: "VALID" } } } -} -- | Computes the gradients of 3-D convolution with respect to the input. conv3DBackpropInputV2 :: forall v'1 v'2 v'3 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 Data.Int.Int32 -- ^ __input_sizes__: An integer vector representing the tensor shape of `input`, -- where `input` is a 5-D -- `[batch, depth, rows, cols, in_channels]` tensor. -> Tensor v'2 t -- ^ __filter__: Shape `[depth, rows, cols, in_channels, out_channels]`. -- `in_channels` must match between `input` and `filter`. -> Tensor v'3 t -- ^ __out_backprop__: Backprop signal of shape `[batch, out_depth, out_rows, out_cols, -- out_channels]`. -> Tensor Build t -- ^ __output__ conv3DBackpropInputV2 = conv3DBackpropInputV2' id conv3DBackpropInputV2' :: forall v'1 v'2 v'3 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 Data.Int.Int32 -- ^ __input_sizes__: An integer vector representing the tensor shape of `input`, -- where `input` is a 5-D -- `[batch, depth, rows, cols, in_channels]` tensor. -> Tensor v'2 t -- ^ __filter__: Shape `[depth, rows, cols, in_channels, out_channels]`. -- `in_channels` must match between `input` and `filter`. -> Tensor v'3 t -- ^ __out_backprop__: Backprop signal of shape `[batch, out_depth, out_rows, out_cols, -- out_channels]`. -> Tensor Build t -- ^ __output__ conv3DBackpropInputV2' op'options input_sizes filter out_backprop | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_sizes, buildInputs filter, buildInputs out_backprop] return (opDef "Conv3DBackpropInputV2" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_sizes" description: "An integer vector representing the tensor shape of `input`,\nwhere `input` is a 5-D\n`[batch, depth, rows, cols, in_channels]` tensor." type: DT_INT32 } input_arg { name: "filter" description: "Shape `[depth, rows, cols, in_channels, out_channels]`.\n`in_channels` must match between `input` and `filter`." type_attr: "T" } input_arg { name: "out_backprop" description: "Backprop signal of shape `[batch, out_depth, out_rows, out_cols,\nout_channels]`." type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "strides" type: "list(int)" description: "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`." has_minimum: true minimum: 5 } attr { name: "padding" type: "string" description: "The type of padding algorithm to use." allowed_values { list { s: "SAME" s: "VALID" } } } -} -- | Computes cos of x element-wise. cos :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ cos = cos' id cos' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ cos' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "Cos" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | Increments 'ref' until it reaches 'limit'. countUpTo :: forall t m' . (MonadBuild m', OneOf '[Data.Int.Int32, Data.Int.Int64] t) => Data.Int.Int64 -- ^ __limit__: If incrementing ref would bring it above limit, instead generates an -- 'OutOfRange' error. -> Tensor Ref t -- ^ __ref__: Should be from a scalar `Variable` node. -> m' (Tensor Value t) -- ^ __output__: A copy of the input before increment. If nothing else modifies the -- input, the values produced will all be distinct. countUpTo = countUpTo' id countUpTo' :: forall t m' . (MonadBuild m', OneOf '[Data.Int.Int32, Data.Int.Int64] t) => OpParams -> Data.Int.Int64 -- ^ __limit__: If incrementing ref would bring it above limit, instead generates an -- 'OutOfRange' error. -> Tensor Ref t -- ^ __ref__: Should be from a scalar `Variable` node. -> m' (Tensor Value t) -- ^ __output__: A copy of the input before increment. If nothing else modifies the -- input, the values produced will all be distinct. countUpTo' op'options limit ref | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs ref] buildOp [] (opDef "CountUpTo" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "limit" .~ limit & op'options & opInputs .~ op'inputs) {- input_arg { name: "ref" description: "Should be from a scalar `Variable` node." type_attr: "T" is_ref: true } output_arg { name: "output" description: "A copy of the input before increment. If nothing else modifies the\ninput, the values produced will all be distinct." type_attr: "T" } attr { name: "limit" type: "int" description: "If incrementing ref would bring it above limit, instead generates an\n\'OutOfRange\' error." } attr { name: "T" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | Extracts crops from the input image tensor and bilinearly resizes them (possibly -- -- with aspect ratio change) to a common output size specified by `crop_size`. This -- is more general than the `crop_to_bounding_box` op which extracts a fixed size -- slice from the input image and does not allow resizing or aspect ratio change. -- -- Returns a tensor with `crops` from the input `image` at positions defined at the -- bounding box locations in `boxes`. The cropped boxes are all resized (with -- bilinear interpolation) to a fixed `size = [crop_height, crop_width]`. The -- result is a 4-D tensor `[num_boxes, crop_height, crop_width, depth]`. cropAndResize :: forall v'1 v'2 v'3 v'4 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __image__: A 4-D tensor of shape `[batch, image_height, image_width, depth]`. -- Both `image_height` and `image_width` need to be positive. -> Tensor v'2 Float -- ^ __boxes__: A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor -- specifies the coordinates of a box in the `box_ind[i]` image and is specified -- in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of -- `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the -- `[0, 1]` interval of normalized image height is mapped to -- `[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in -- which case the sampled crop is an up-down flipped version of the original -- image. The width dimension is treated similarly. Normalized coordinates -- outside the `[0, 1]` range are allowed, in which case we use -- `extrapolation_value` to extrapolate the input image values. -> Tensor v'3 Data.Int.Int32 -- ^ __box_ind__: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`. -- The value of `box_ind[i]` specifies the image that the `i`-th box refers to. -> Tensor v'4 Data.Int.Int32 -- ^ __crop_size__: A 1-D tensor of 2 elements, `size = [crop_height, crop_width]`. All -- cropped image patches are resized to this size. The aspect ratio of the image -- content is not preserved. Both `crop_height` and `crop_width` need to be -- positive. -> Tensor Build Float -- ^ __crops__: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`. cropAndResize = cropAndResize' id cropAndResize' :: forall v'1 v'2 v'3 v'4 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __image__: A 4-D tensor of shape `[batch, image_height, image_width, depth]`. -- Both `image_height` and `image_width` need to be positive. -> Tensor v'2 Float -- ^ __boxes__: A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor -- specifies the coordinates of a box in the `box_ind[i]` image and is specified -- in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of -- `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the -- `[0, 1]` interval of normalized image height is mapped to -- `[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in -- which case the sampled crop is an up-down flipped version of the original -- image. The width dimension is treated similarly. Normalized coordinates -- outside the `[0, 1]` range are allowed, in which case we use -- `extrapolation_value` to extrapolate the input image values. -> Tensor v'3 Data.Int.Int32 -- ^ __box_ind__: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`. -- The value of `box_ind[i]` specifies the image that the `i`-th box refers to. -> Tensor v'4 Data.Int.Int32 -- ^ __crop_size__: A 1-D tensor of 2 elements, `size = [crop_height, crop_width]`. All -- cropped image patches are resized to this size. The aspect ratio of the image -- content is not preserved. Both `crop_height` and `crop_width` need to be -- positive. -> Tensor Build Float -- ^ __crops__: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`. cropAndResize' op'options image boxes box_ind crop_size | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs image, buildInputs boxes, buildInputs box_ind, buildInputs crop_size] return (opDef "CropAndResize" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "image" description: "A 4-D tensor of shape `[batch, image_height, image_width, depth]`.\nBoth `image_height` and `image_width` need to be positive." type_attr: "T" } input_arg { name: "boxes" description: "A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor\nspecifies the coordinates of a box in the `box_ind[i]` image and is specified\nin normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of\n`y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the\n`[0, 1]` interval of normalized image height is mapped to\n`[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in\nwhich case the sampled crop is an up-down flipped version of the original\nimage. The width dimension is treated similarly. Normalized coordinates\noutside the `[0, 1]` range are allowed, in which case we use\n`extrapolation_value` to extrapolate the input image values." type: DT_FLOAT } input_arg { name: "box_ind" description: "A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.\nThe value of `box_ind[i]` specifies the image that the `i`-th box refers to." type: DT_INT32 } input_arg { name: "crop_size" description: "A 1-D tensor of 2 elements, `size = [crop_height, crop_width]`. All\ncropped image patches are resized to this size. The aspect ratio of the image\ncontent is not preserved. Both `crop_height` and `crop_width` need to be\npositive." type: DT_INT32 } output_arg { name: "crops" description: "A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`." type: DT_FLOAT } attr { name: "T" type: "type" allowed_values { list { type: DT_UINT8 type: DT_INT8 type: DT_INT16 type: DT_INT32 type: DT_INT64 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "method" type: "string" default_value { s: "bilinear" } description: "A string specifying the interpolation method. Only \'bilinear\' is\nsupported for now." allowed_values { list { s: "bilinear" } } } attr { name: "extrapolation_value" type: "float" default_value { f: 0.0 } description: "Value used for extrapolation, when applicable." } -} -- | Computes the gradient of the crop_and_resize op wrt the input boxes tensor. cropAndResizeGradBoxes :: forall v'1 v'2 v'3 v'4 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 Float -- ^ __grads__: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`. -> Tensor v'2 t -- ^ __image__: A 4-D tensor of shape `[batch, image_height, image_width, depth]`. -- Both `image_height` and `image_width` need to be positive. -> Tensor v'3 Float -- ^ __boxes__: A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor -- specifies the coordinates of a box in the `box_ind[i]` image and is specified -- in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of -- `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the -- `[0, 1]` interval of normalized image height is mapped to -- `[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in -- which case the sampled crop is an up-down flipped version of the original -- image. The width dimension is treated similarly. Normalized coordinates -- outside the `[0, 1]` range are allowed, in which case we use -- `extrapolation_value` to extrapolate the input image values. -> Tensor v'4 Data.Int.Int32 -- ^ __box_ind__: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`. -- The value of `box_ind[i]` specifies the image that the `i`-th box refers to. -> Tensor Build Float -- ^ __output__: A 2-D tensor of shape `[num_boxes, 4]`. cropAndResizeGradBoxes = cropAndResizeGradBoxes' id cropAndResizeGradBoxes' :: forall v'1 v'2 v'3 v'4 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 Float -- ^ __grads__: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`. -> Tensor v'2 t -- ^ __image__: A 4-D tensor of shape `[batch, image_height, image_width, depth]`. -- Both `image_height` and `image_width` need to be positive. -> Tensor v'3 Float -- ^ __boxes__: A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor -- specifies the coordinates of a box in the `box_ind[i]` image and is specified -- in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of -- `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the -- `[0, 1]` interval of normalized image height is mapped to -- `[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in -- which case the sampled crop is an up-down flipped version of the original -- image. The width dimension is treated similarly. Normalized coordinates -- outside the `[0, 1]` range are allowed, in which case we use -- `extrapolation_value` to extrapolate the input image values. -> Tensor v'4 Data.Int.Int32 -- ^ __box_ind__: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`. -- The value of `box_ind[i]` specifies the image that the `i`-th box refers to. -> Tensor Build Float -- ^ __output__: A 2-D tensor of shape `[num_boxes, 4]`. cropAndResizeGradBoxes' op'options grads image boxes box_ind | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs grads, buildInputs image, buildInputs boxes, buildInputs box_ind] return (opDef "CropAndResizeGradBoxes" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "grads" description: "A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`." type: DT_FLOAT } input_arg { name: "image" description: "A 4-D tensor of shape `[batch, image_height, image_width, depth]`.\nBoth `image_height` and `image_width` need to be positive." type_attr: "T" } input_arg { name: "boxes" description: "A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor\nspecifies the coordinates of a box in the `box_ind[i]` image and is specified\nin normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of\n`y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the\n`[0, 1]` interval of normalized image height is mapped to\n`[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in\nwhich case the sampled crop is an up-down flipped version of the original\nimage. The width dimension is treated similarly. Normalized coordinates\noutside the `[0, 1]` range are allowed, in which case we use\n`extrapolation_value` to extrapolate the input image values." type: DT_FLOAT } input_arg { name: "box_ind" description: "A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.\nThe value of `box_ind[i]` specifies the image that the `i`-th box refers to." type: DT_INT32 } output_arg { name: "output" description: "A 2-D tensor of shape `[num_boxes, 4]`." type: DT_FLOAT } attr { name: "T" type: "type" allowed_values { list { type: DT_UINT8 type: DT_INT8 type: DT_INT16 type: DT_INT32 type: DT_INT64 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "method" type: "string" default_value { s: "bilinear" } description: "A string specifying the interpolation method. Only \'bilinear\' is\nsupported for now." allowed_values { list { s: "bilinear" } } } -} -- | Computes the gradient of the crop_and_resize op wrt the input image tensor. cropAndResizeGradImage :: forall v'1 v'2 v'3 v'4 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 Float -- ^ __grads__: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`. -> Tensor v'2 Float -- ^ __boxes__: A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor -- specifies the coordinates of a box in the `box_ind[i]` image and is specified -- in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of -- `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the -- `[0, 1]` interval of normalized image height is mapped to -- `[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in -- which case the sampled crop is an up-down flipped version of the original -- image. The width dimension is treated similarly. Normalized coordinates -- outside the `[0, 1]` range are allowed, in which case we use -- `extrapolation_value` to extrapolate the input image values. -> Tensor v'3 Data.Int.Int32 -- ^ __box_ind__: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`. -- The value of `box_ind[i]` specifies the image that the `i`-th box refers to. -> Tensor v'4 Data.Int.Int32 -- ^ __image_size__: A 1-D tensor with value `[batch, image_height, image_width, depth]` -- containing the original image size. Both `image_height` and `image_width` need -- to be positive. -> Tensor Build t -- ^ __output__: A 4-D tensor of shape `[batch, image_height, image_width, depth]`. cropAndResizeGradImage = cropAndResizeGradImage' id cropAndResizeGradImage' :: forall v'1 v'2 v'3 v'4 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 Float -- ^ __grads__: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`. -> Tensor v'2 Float -- ^ __boxes__: A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor -- specifies the coordinates of a box in the `box_ind[i]` image and is specified -- in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of -- `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the -- `[0, 1]` interval of normalized image height is mapped to -- `[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in -- which case the sampled crop is an up-down flipped version of the original -- image. The width dimension is treated similarly. Normalized coordinates -- outside the `[0, 1]` range are allowed, in which case we use -- `extrapolation_value` to extrapolate the input image values. -> Tensor v'3 Data.Int.Int32 -- ^ __box_ind__: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`. -- The value of `box_ind[i]` specifies the image that the `i`-th box refers to. -> Tensor v'4 Data.Int.Int32 -- ^ __image_size__: A 1-D tensor with value `[batch, image_height, image_width, depth]` -- containing the original image size. Both `image_height` and `image_width` need -- to be positive. -> Tensor Build t -- ^ __output__: A 4-D tensor of shape `[batch, image_height, image_width, depth]`. cropAndResizeGradImage' op'options grads boxes box_ind image_size | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs grads, buildInputs boxes, buildInputs box_ind, buildInputs image_size] return (opDef "CropAndResizeGradImage" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "grads" description: "A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`." type: DT_FLOAT } input_arg { name: "boxes" description: "A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor\nspecifies the coordinates of a box in the `box_ind[i]` image and is specified\nin normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of\n`y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the\n`[0, 1]` interval of normalized image height is mapped to\n`[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in\nwhich case the sampled crop is an up-down flipped version of the original\nimage. The width dimension is treated similarly. Normalized coordinates\noutside the `[0, 1]` range are allowed, in which case we use\n`extrapolation_value` to extrapolate the input image values." type: DT_FLOAT } input_arg { name: "box_ind" description: "A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.\nThe value of `box_ind[i]` specifies the image that the `i`-th box refers to." type: DT_INT32 } input_arg { name: "image_size" description: "A 1-D tensor with value `[batch, image_height, image_width, depth]`\ncontaining the original image size. Both `image_height` and `image_width` need\nto be positive." type: DT_INT32 } output_arg { name: "output" description: "A 4-D tensor of shape `[batch, image_height, image_width, depth]`." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_HALF type: DT_DOUBLE } } } attr { name: "method" type: "string" default_value { s: "bilinear" } description: "A string specifying the interpolation method. Only \'bilinear\' is\nsupported for now." allowed_values { list { s: "bilinear" } } } -} -- | Compute the pairwise cross product. -- -- `a` and `b` must be the same shape; they can either be simple 3-element vectors, -- or any shape where the innermost dimension is 3. In the latter case, each pair -- of corresponding 3-element vectors is cross-multiplied independently. cross :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __a__: A tensor containing 3-element vectors. -> Tensor v'2 t -- ^ __b__: Another tensor, of same type and shape as `a`. -> Tensor Build t -- ^ __product__: Pairwise cross product of the vectors in `a` and `b`. cross = cross' id cross' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __a__: A tensor containing 3-element vectors. -> Tensor v'2 t -- ^ __b__: Another tensor, of same type and shape as `a`. -> Tensor Build t -- ^ __product__: Pairwise cross product of the vectors in `a` and `b`. cross' op'options a b | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs a, buildInputs b] return (opDef "Cross" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "a" description: "A tensor containing 3-element vectors." type_attr: "T" } input_arg { name: "b" description: "Another tensor, of same type and shape as `a`." type_attr: "T" } output_arg { name: "product" description: "Pairwise cross product of the vectors in `a` and `b`." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF } } } -} -- | Compute the cumulative product of the tensor `x` along `axis`. -- -- By default, this op performs an inclusive cumprod, which means that the first -- element of the input is identical to the first element of the output: -- ```prettyprint -- tf.cumprod([a, b, c]) ==> [a, a * b, a * b * c] -- ``` -- -- By setting the `exclusive` kwarg to `True`, an exclusive cumprod is -- performed instead: -- ```prettyprint -- tf.cumprod([a, b, c], exclusive=True) ==> [1, a, a * b] -- ``` -- -- By setting the `reverse` kwarg to `True`, the cumprod is performed in the -- opposite direction: -- ```prettyprint -- tf.cumprod([a, b, c], reverse=True) ==> [a * b * c, b * c, c] -- ``` -- This is more efficient than using separate `tf.reverse` ops. -- -- The `reverse` and `exclusive` kwargs can also be combined: -- ```prettyprint -- tf.cumprod([a, b, c], exclusive=True, reverse=True) ==> [b * c, c, 1] -- ``` cumprod :: forall v'1 v'2 t tidx . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 tidx -- ^ __axis__ -> Tensor Build t -- ^ __out__ cumprod = cumprod' id cumprod' :: forall v'1 v'2 t tidx . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 tidx -- ^ __axis__ -> Tensor Build t -- ^ __out__ cumprod' op'options x axis | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs axis] return (opDef "Cumprod" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tidx" .~ tensorType (undefined :: tidx) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "axis" type_attr: "Tidx" } output_arg { name: "out" type_attr: "T" } attr { name: "exclusive" type: "bool" default_value { b: false } } attr { name: "reverse" type: "bool" default_value { b: false } } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "Tidx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | Compute the cumulative sum of the tensor `x` along `axis`. -- -- By default, this op performs an inclusive cumsum, which means that the first -- element of the input is identical to the first element of the output: -- ```prettyprint -- tf.cumsum([a, b, c]) ==> [a, a + b, a + b + c] -- ``` -- -- By setting the `exclusive` kwarg to `True`, an exclusive cumsum is -- performed instead: -- ```prettyprint -- tf.cumsum([a, b, c], exclusive=True) ==> [0, a, a + b] -- ``` -- -- By setting the `reverse` kwarg to `True`, the cumsum is performed in the -- opposite direction: -- ```prettyprint -- tf.cumsum([a, b, c], reverse=True) ==> [a + b + c, b + c, c] -- ``` -- This is more efficient than using separate `tf.reverse` ops. -- -- The `reverse` and `exclusive` kwargs can also be combined: -- ```prettyprint -- tf.cumsum([a, b, c], exclusive=True, reverse=True) ==> [b + c, c, 0] -- ``` cumsum :: forall v'1 v'2 t tidx . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 tidx -- ^ __axis__ -> Tensor Build t -- ^ __out__ cumsum = cumsum' id cumsum' :: forall v'1 v'2 t tidx . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 tidx -- ^ __axis__ -> Tensor Build t -- ^ __out__ cumsum' op'options x axis | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs axis] return (opDef "Cumsum" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tidx" .~ tensorType (undefined :: tidx) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "axis" type_attr: "Tidx" } output_arg { name: "out" type_attr: "T" } attr { name: "exclusive" type: "bool" default_value { b: false } } attr { name: "reverse" type: "bool" default_value { b: false } } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "Tidx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | Decode web-safe base64-encoded strings. -- -- Input may or may not have padding at the end. See EncodeBase64 for padding. -- Web-safe means that input must use - and _ instead of + and /. decodeBase64 :: Tensor v'1 Data.ByteString.ByteString -- ^ __input__: Base64 strings to decode. -> Tensor Build Data.ByteString.ByteString -- ^ __output__: Decoded strings. decodeBase64 = decodeBase64' id decodeBase64' :: OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __input__: Base64 strings to decode. -> Tensor Build Data.ByteString.ByteString -- ^ __output__: Decoded strings. decodeBase64' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "DecodeBase64" & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "Base64 strings to decode." type: DT_STRING } output_arg { name: "output" description: "Decoded strings." type: DT_STRING } -} -- | Convert CSV records to tensors. Each column maps to one tensor. -- -- RFC 4180 format is expected for the CSV records. -- (https://tools.ietf.org/html/rfc4180) -- Note that we allow leading and trailing spaces with int or float field. decodeCSV :: forall v'1 v'2 oUT_TYPE . (OneOfs '[Data.ByteString.ByteString, Data.Int.Int32, Data.Int.Int64, Float] oUT_TYPE) => Tensor v'1 Data.ByteString.ByteString -- ^ __records__: Each string is a record/row in the csv and all records should have -- the same format. -> TensorList (v'2) oUT_TYPE -- ^ __record_defaults__: One tensor per column of the input record, with either a -- scalar default value for that column or empty if the column is required. -> TensorList (Build) oUT_TYPE -- ^ __output__: Each tensor will have the same shape as records. decodeCSV = decodeCSV' id decodeCSV' :: forall v'1 v'2 oUT_TYPE . (OneOfs '[Data.ByteString.ByteString, Data.Int.Int32, Data.Int.Int64, Float] oUT_TYPE) => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __records__: Each string is a record/row in the csv and all records should have -- the same format. -> TensorList (v'2) oUT_TYPE -- ^ __record_defaults__: One tensor per column of the input record, with either a -- scalar default value for that column or empty if the column is required. -> TensorList (Build) oUT_TYPE -- ^ __output__: Each tensor will have the same shape as records. decodeCSV' op'options records record_defaults | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs records, buildInputs record_defaults] return (opDef "DecodeCSV" & opAttr "OUT_TYPE" .~ fromTensorTypes (Proxy :: Proxy oUT_TYPE) & op'options & opInputs .~ op'inputs) {- input_arg { name: "records" description: "Each string is a record/row in the csv and all records should have\nthe same format." type: DT_STRING } input_arg { name: "record_defaults" description: "One tensor per column of the input record, with either a\nscalar default value for that column or empty if the column is required." type_list_attr: "OUT_TYPE" } output_arg { name: "output" description: "Each tensor will have the same shape as records." type_list_attr: "OUT_TYPE" } attr { name: "OUT_TYPE" type: "list(type)" has_minimum: true minimum: 1 allowed_values { list { type: DT_FLOAT type: DT_INT32 type: DT_INT64 type: DT_STRING } } } attr { name: "field_delim" type: "string" default_value { s: "," } description: "delimiter to separate fields in a record." } -} -- | Decode the first frame of a GIF-encoded image to a uint8 tensor. -- -- GIF with frame or transparency compression are not supported -- convert animated GIF from compressed to uncompressed by: -- -- convert $src.gif -coalesce $dst.gif decodeGif :: Tensor v'1 Data.ByteString.ByteString -- ^ __contents__: 0-D. The GIF-encoded image. -> Tensor Build Data.Word.Word8 -- ^ __image__: 4-D with shape `[num_frames, height, width, 3]`. RGB order decodeGif = decodeGif' id decodeGif' :: OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __contents__: 0-D. The GIF-encoded image. -> Tensor Build Data.Word.Word8 -- ^ __image__: 4-D with shape `[num_frames, height, width, 3]`. RGB order decodeGif' op'options contents | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs contents] return (opDef "DecodeGif" & op'options & opInputs .~ op'inputs) {- input_arg { name: "contents" description: "0-D. The GIF-encoded image." type: DT_STRING } output_arg { name: "image" description: "4-D with shape `[num_frames, height, width, 3]`. RGB order" type: DT_UINT8 } -} -- | Convert JSON-encoded Example records to binary protocol buffer strings. -- -- This op translates a tensor containing Example records, encoded using -- the [standard JSON -- mapping](https://developers.google.com/protocol-buffers/docs/proto3#json), -- into a tensor containing the same records encoded as binary protocol -- buffers. The resulting tensor can then be fed to any of the other -- Example-parsing ops. decodeJSONExample :: Tensor v'1 Data.ByteString.ByteString -- ^ __json_examples__: Each string is a JSON object serialized according to the JSON -- mapping of the Example proto. -> Tensor Build Data.ByteString.ByteString -- ^ __binary_examples__: Each string is a binary Example protocol buffer corresponding -- to the respective element of `json_examples`. decodeJSONExample = decodeJSONExample' id decodeJSONExample' :: OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __json_examples__: Each string is a JSON object serialized according to the JSON -- mapping of the Example proto. -> Tensor Build Data.ByteString.ByteString -- ^ __binary_examples__: Each string is a binary Example protocol buffer corresponding -- to the respective element of `json_examples`. decodeJSONExample' op'options json_examples | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs json_examples] return (opDef "DecodeJSONExample" & op'options & opInputs .~ op'inputs) {- input_arg { name: "json_examples" description: "Each string is a JSON object serialized according to the JSON\nmapping of the Example proto." type: DT_STRING } output_arg { name: "binary_examples" description: "Each string is a binary Example protocol buffer corresponding\nto the respective element of `json_examples`." type: DT_STRING } -} -- | Decode a JPEG-encoded image to a uint8 tensor. -- -- The attr `channels` indicates the desired number of color channels for the -- decoded image. -- -- Accepted values are: -- -- * 0: Use the number of channels in the JPEG-encoded image. -- * 1: output a grayscale image. -- * 3: output an RGB image. -- -- If needed, the JPEG-encoded image is transformed to match the requested number -- of color channels. -- -- The attr `ratio` allows downscaling the image by an integer factor during -- decoding. Allowed values are: 1, 2, 4, and 8. This is much faster than -- downscaling the image later. decodeJpeg :: Tensor v'1 Data.ByteString.ByteString -- ^ __contents__: 0-D. The JPEG-encoded image. -> Tensor Build Data.Word.Word8 -- ^ __image__: 3-D with shape `[height, width, channels]`.. decodeJpeg = decodeJpeg' id decodeJpeg' :: OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __contents__: 0-D. The JPEG-encoded image. -> Tensor Build Data.Word.Word8 -- ^ __image__: 3-D with shape `[height, width, channels]`.. decodeJpeg' op'options contents | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs contents] return (opDef "DecodeJpeg" & op'options & opInputs .~ op'inputs) {- input_arg { name: "contents" description: "0-D. The JPEG-encoded image." type: DT_STRING } output_arg { name: "image" description: "3-D with shape `[height, width, channels]`.." type: DT_UINT8 } attr { name: "channels" type: "int" default_value { i: 0 } description: "Number of color channels for the decoded image." } attr { name: "ratio" type: "int" default_value { i: 1 } description: "Downscaling ratio." } attr { name: "fancy_upscaling" type: "bool" default_value { b: true } description: "If true use a slower but nicer upscaling of the\nchroma planes (yuv420/422 only)." } attr { name: "try_recover_truncated" type: "bool" default_value { b: false } description: "If true try to recover an image from truncated input." } attr { name: "acceptable_fraction" type: "float" default_value { f: 1.0 } description: "The minimum required fraction of lines before a truncated\ninput is accepted." } attr { name: "dct_method" type: "string" default_value { s: "" } description: "string specifying a hint about the algorithm used for\ndecompression. Defaults to \"\" which maps to a system-specific\ndefault. Currently valid values are [\"INTEGER_FAST\",\n\"INTEGER_ACCURATE\"]. The hint may be ignored (e.g., the internal\njpeg library changes to a version that does not have that specific\noption.)" } -} -- | Decode a PNG-encoded image to a uint8 or uint16 tensor. -- -- The attr `channels` indicates the desired number of color channels for the -- decoded image. -- -- Accepted values are: -- -- * 0: Use the number of channels in the PNG-encoded image. -- * 1: output a grayscale image. -- * 3: output an RGB image. -- * 4: output an RGBA image. -- -- If needed, the PNG-encoded image is transformed to match the requested number -- of color channels. decodePng :: forall v'1 dtype . (OneOf '[Data.Word.Word16, Data.Word.Word8] dtype) => Tensor v'1 Data.ByteString.ByteString -- ^ __contents__: 0-D. The PNG-encoded image. -> Tensor Build dtype -- ^ __image__: 3-D with shape `[height, width, channels]`. decodePng = decodePng' id decodePng' :: forall v'1 dtype . (OneOf '[Data.Word.Word16, Data.Word.Word8] dtype) => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __contents__: 0-D. The PNG-encoded image. -> Tensor Build dtype -- ^ __image__: 3-D with shape `[height, width, channels]`. decodePng' op'options contents | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs contents] return (opDef "DecodePng" & opAttr "dtype" .~ tensorType (undefined :: dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "contents" description: "0-D. The PNG-encoded image." type: DT_STRING } output_arg { name: "image" description: "3-D with shape `[height, width, channels]`." type_attr: "dtype" } attr { name: "channels" type: "int" default_value { i: 0 } description: "Number of color channels for the decoded image." } attr { name: "dtype" type: "type" default_value { type: DT_UINT8 } allowed_values { list { type: DT_UINT8 type: DT_UINT16 } } } -} -- | Reinterpret the bytes of a string as a vector of numbers. decodeRaw :: forall v'1 out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] out_type) => Tensor v'1 Data.ByteString.ByteString -- ^ __bytes__: All the elements must have the same length. -> Tensor Build out_type -- ^ __output__: A Tensor with one more dimension than the input `bytes`. The -- added dimension will have size equal to the length of the elements -- of `bytes` divided by the number of bytes to represent `out_type`. decodeRaw = decodeRaw' id decodeRaw' :: forall v'1 out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] out_type) => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __bytes__: All the elements must have the same length. -> Tensor Build out_type -- ^ __output__: A Tensor with one more dimension than the input `bytes`. The -- added dimension will have size equal to the length of the elements -- of `bytes` divided by the number of bytes to represent `out_type`. decodeRaw' op'options bytes | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs bytes] return (opDef "DecodeRaw" & opAttr "out_type" .~ tensorType (undefined :: out_type) & op'options & opInputs .~ op'inputs) {- input_arg { name: "bytes" description: "All the elements must have the same length." type: DT_STRING } output_arg { name: "output" description: "A Tensor with one more dimension than the input `bytes`. The\nadded dimension will have size equal to the length of the elements\nof `bytes` divided by the number of bytes to represent `out_type`." type_attr: "out_type" } attr { name: "out_type" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_INT64 } } } attr { name: "little_endian" type: "bool" default_value { b: true } description: "Whether the input `bytes` are in little-endian order.\nIgnored for `out_type` values that are stored in a single byte like\n`uint8`." } -} -- | Delete the tensor specified by its handle in the session. deleteSessionTensor :: forall v'1 m' . (MonadBuild m') => Tensor v'1 Data.ByteString.ByteString -- ^ __handle__: The handle for a tensor stored in the session state. -> m' (ControlNode) deleteSessionTensor = deleteSessionTensor' id deleteSessionTensor' :: forall v'1 m' . (MonadBuild m') => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __handle__: The handle for a tensor stored in the session state. -> m' (ControlNode) deleteSessionTensor' op'options handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle] buildOp [] (opDef "DeleteSessionTensor" & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" description: "The handle for a tensor stored in the session state." type: DT_STRING } -} -- | Applies set operation along last dimension of 2 `Tensor` inputs. -- -- See SetOperationOp::SetOperationFromContext for values of `set_operation`. -- -- Output `result` is a `SparseTensor` represented by `result_indices`, -- `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this -- has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth` -- dimension contains the result of `set_operation` applied to the corresponding -- `[0...n-1]` dimension of `set`. denseToDenseSetOperation :: forall v'1 v'2 t . (OneOf '[Data.ByteString.ByteString, Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8] t) => Tensor v'1 t -- ^ __set1__: `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set2`. -- Dimension `n` contains values in a set, duplicates are allowed but ignored. -> Tensor v'2 t -- ^ __set2__: `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set1`. -- Dimension `n` contains values in a set, duplicates are allowed but ignored. -> (Tensor Build Data.Int.Int64, Tensor Build t, Tensor Build Data.Int.Int64) -- ^ (__result_indices__, __result_values__, __result_shape__) -- -- * __result_indices__: 2D indices of a `SparseTensor`. -- -- * __result_values__: 1D values of a `SparseTensor`. -- -- * __result_shape__: 1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is -- the same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]` -- is the max result set size across all `0...n-1` dimensions. denseToDenseSetOperation = denseToDenseSetOperation' id denseToDenseSetOperation' :: forall v'1 v'2 t . (OneOf '[Data.ByteString.ByteString, Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8] t) => OpParams -> Tensor v'1 t -- ^ __set1__: `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set2`. -- Dimension `n` contains values in a set, duplicates are allowed but ignored. -> Tensor v'2 t -- ^ __set2__: `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set1`. -- Dimension `n` contains values in a set, duplicates are allowed but ignored. -> (Tensor Build Data.Int.Int64, Tensor Build t, Tensor Build Data.Int.Int64) -- ^ (__result_indices__, __result_values__, __result_shape__) -- -- * __result_indices__: 2D indices of a `SparseTensor`. -- -- * __result_values__: 1D values of a `SparseTensor`. -- -- * __result_shape__: 1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is -- the same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]` -- is the max result set size across all `0...n-1` dimensions. denseToDenseSetOperation' op'options set1 set2 | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs set1, buildInputs set2] return (opDef "DenseToDenseSetOperation" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "set1" description: "`Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set2`.\nDimension `n` contains values in a set, duplicates are allowed but ignored." type_attr: "T" } input_arg { name: "set2" description: "`Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set1`.\nDimension `n` contains values in a set, duplicates are allowed but ignored." type_attr: "T" } output_arg { name: "result_indices" description: "2D indices of a `SparseTensor`." type: DT_INT64 } output_arg { name: "result_values" description: "1D values of a `SparseTensor`." type_attr: "T" } output_arg { name: "result_shape" description: "1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is\nthe same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]`\nis the max result set size across all `0...n-1` dimensions." type: DT_INT64 } attr { name: "set_operation" type: "string" } attr { name: "validate_indices" type: "bool" default_value { b: true } } attr { name: "T" type: "type" allowed_values { list { type: DT_INT8 type: DT_INT16 type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_UINT16 type: DT_STRING } } } -} -- | Applies set operation along last dimension of `Tensor` and `SparseTensor`. -- -- See SetOperationOp::SetOperationFromContext for values of `set_operation`. -- -- Input `set2` is a `SparseTensor` represented by `set2_indices`, `set2_values`, -- and `set2_shape`. For `set2` ranked `n`, 1st `n-1` dimensions must be the same -- as `set1`. Dimension `n` contains values in a set, duplicates are allowed but -- ignored. -- -- If `validate_indices` is `True`, this op validates the order and range of `set2` -- indices. -- -- Output `result` is a `SparseTensor` represented by `result_indices`, -- `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this -- has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth` -- dimension contains the result of `set_operation` applied to the corresponding -- `[0...n-1]` dimension of `set`. denseToSparseSetOperation :: forall v'1 v'2 v'3 v'4 t . (OneOf '[Data.ByteString.ByteString, Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8] t) => Tensor v'1 t -- ^ __set1__: `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set2`. -- Dimension `n` contains values in a set, duplicates are allowed but ignored. -> Tensor v'2 Data.Int.Int64 -- ^ __set2_indices__: 2D `Tensor`, indices of a `SparseTensor`. Must be in row-major -- order. -> Tensor v'3 t -- ^ __set2_values__: 1D `Tensor`, values of a `SparseTensor`. Must be in row-major -- order. -> Tensor v'4 Data.Int.Int64 -- ^ __set2_shape__: 1D `Tensor`, shape of a `SparseTensor`. `set2_shape[0...n-1]` must -- be the same as the 1st `n-1` dimensions of `set1`, `result_shape[n]` is the -- max set size across `n-1` dimensions. -> (Tensor Build Data.Int.Int64, Tensor Build t, Tensor Build Data.Int.Int64) -- ^ (__result_indices__, __result_values__, __result_shape__) -- -- * __result_indices__: 2D indices of a `SparseTensor`. -- -- * __result_values__: 1D values of a `SparseTensor`. -- -- * __result_shape__: 1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is -- the same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]` -- is the max result set size across all `0...n-1` dimensions. denseToSparseSetOperation = denseToSparseSetOperation' id denseToSparseSetOperation' :: forall v'1 v'2 v'3 v'4 t . (OneOf '[Data.ByteString.ByteString, Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8] t) => OpParams -> Tensor v'1 t -- ^ __set1__: `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set2`. -- Dimension `n` contains values in a set, duplicates are allowed but ignored. -> Tensor v'2 Data.Int.Int64 -- ^ __set2_indices__: 2D `Tensor`, indices of a `SparseTensor`. Must be in row-major -- order. -> Tensor v'3 t -- ^ __set2_values__: 1D `Tensor`, values of a `SparseTensor`. Must be in row-major -- order. -> Tensor v'4 Data.Int.Int64 -- ^ __set2_shape__: 1D `Tensor`, shape of a `SparseTensor`. `set2_shape[0...n-1]` must -- be the same as the 1st `n-1` dimensions of `set1`, `result_shape[n]` is the -- max set size across `n-1` dimensions. -> (Tensor Build Data.Int.Int64, Tensor Build t, Tensor Build Data.Int.Int64) -- ^ (__result_indices__, __result_values__, __result_shape__) -- -- * __result_indices__: 2D indices of a `SparseTensor`. -- -- * __result_values__: 1D values of a `SparseTensor`. -- -- * __result_shape__: 1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is -- the same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]` -- is the max result set size across all `0...n-1` dimensions. denseToSparseSetOperation' op'options set1 set2_indices set2_values set2_shape | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs set1, buildInputs set2_indices, buildInputs set2_values, buildInputs set2_shape] return (opDef "DenseToSparseSetOperation" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "set1" description: "`Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set2`.\nDimension `n` contains values in a set, duplicates are allowed but ignored." type_attr: "T" } input_arg { name: "set2_indices" description: "2D `Tensor`, indices of a `SparseTensor`. Must be in row-major\norder." type: DT_INT64 } input_arg { name: "set2_values" description: "1D `Tensor`, values of a `SparseTensor`. Must be in row-major\norder." type_attr: "T" } input_arg { name: "set2_shape" description: "1D `Tensor`, shape of a `SparseTensor`. `set2_shape[0...n-1]` must\nbe the same as the 1st `n-1` dimensions of `set1`, `result_shape[n]` is the\nmax set size across `n-1` dimensions." type: DT_INT64 } output_arg { name: "result_indices" description: "2D indices of a `SparseTensor`." type: DT_INT64 } output_arg { name: "result_values" description: "1D values of a `SparseTensor`." type_attr: "T" } output_arg { name: "result_shape" description: "1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is\nthe same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]`\nis the max result set size across all `0...n-1` dimensions." type: DT_INT64 } attr { name: "set_operation" type: "string" } attr { name: "validate_indices" type: "bool" default_value { b: true } } attr { name: "T" type: "type" allowed_values { list { type: DT_INT8 type: DT_INT16 type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_UINT16 type: DT_STRING } } } -} -- | DepthToSpace for tensors of type T. -- -- Rearranges data from depth into blocks of spatial data. -- This is the reverse transformation of SpaceToDepth. More specifically, -- this op outputs a copy of the input tensor where values from the `depth` -- dimension are moved in spatial blocks to the `height` and `width` dimensions. -- The attr `block_size` indicates the input block size and how the data is moved. -- -- * Chunks of data of size `block_size * block_size` from depth are rearranged -- into non-overlapping blocks of size `block_size x block_size` -- * The width the output tensor is `input_depth * block_size`, whereas the -- height is `input_height * block_size`. -- * The depth of the input tensor must be divisible by -- `block_size * block_size`. -- -- That is, assuming the input is in the shape: -- `[batch, height, width, depth]`, -- the shape of the output will be: -- `[batch, height*block_size, width*block_size, depth/(block_size*block_size)]` -- -- This operation requires that the input tensor be of rank 4, and that -- `block_size` be >=1 and that `block_size * block_size` be a divisor of the -- input depth. -- -- This operation is useful for resizing the activations between convolutions -- (but keeping all data), e.g. instead of pooling. It is also useful for training -- purely convolutional models. -- -- For example, given this input of shape `[1, 1, 1, 4]`, and a block size of 2: -- -- ```prettyprint -- x = [[[[1, 2, 3, 4]]]] -- -- ``` -- -- This operation will output a tensor of shape `[1, 2, 2, 1]`: -- -- ```prettyprint -- [[[[1], [2]], -- [[3], [4]]]] -- ``` -- -- Here, the input has a batch of 1 and each batch element has shape `[1, 1, 4]`, -- the corresponding output will have 2x2 elements and will have a depth of -- 1 channel (1 = `4 / (block_size * block_size)`). -- The output element shape is `[2, 2, 1]`. -- -- For an input tensor with larger depth, here of shape `[1, 1, 1, 12]`, e.g. -- -- ```prettyprint -- x = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]] -- ``` -- -- This operation, for block size of 2, will return the following tensor of shape -- `[1, 2, 2, 3]` -- -- ```prettyprint -- [[[[1, 2, 3], [4, 5, 6]], -- [[7, 8, 9], [10, 11, 12]]]] -- -- ``` -- -- Similarly, for the following input of shape `[1 2 2 4]`, and a block size of 2: -- -- ```prettyprint -- x = [[[[1, 2, 3, 4], -- [5, 6, 7, 8]], -- [[9, 10, 11, 12], -- [13, 14, 15, 16]]]] -- ``` -- -- the operator will return the following tensor of shape `[1 4 4 1]`: -- -- ```prettyprint -- x = [[ [1], [2], [5], [6]], -- [ [3], [4], [7], [8]], -- [ [9], [10], [13], [14]], -- [ [11], [12], [15], [16]]] -- -- ``` depthToSpace :: forall v'1 t . (TensorType t) => Data.Int.Int64 -- ^ __block_size__: The size of the spatial block, same as in Space2Depth. -> Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ depthToSpace = depthToSpace' id depthToSpace' :: forall v'1 t . (TensorType t) => OpParams -> Data.Int.Int64 -- ^ __block_size__: The size of the spatial block, same as in Space2Depth. -> Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ depthToSpace' op'options block_size input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "DepthToSpace" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "block_size" .~ block_size & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "block_size" type: "int" description: "The size of the spatial block, same as in Space2Depth." has_minimum: true minimum: 2 } -} -- | Computes a 2-D depthwise convolution given 4-D `input` and `filter` tensors. -- -- Given an input tensor of shape `[batch, in_height, in_width, in_channels]` -- and a filter / kernel tensor of shape -- `[filter_height, filter_width, in_channels, channel_multiplier]`, containing -- `in_channels` convolutional filters of depth 1, `depthwise_conv2d` applies -- a different filter to each input channel (expanding from 1 channel to -- `channel_multiplier` channels for each), then concatenates the results -- together. Thus, the output has `in_channels * channel_multiplier` channels. -- -- for k in 0..in_channels-1 -- for q in 0..channel_multiplier-1 -- output[b, i, j, k * channel_multiplier + q] = -- sum_{di, dj} input[b, strides[1] * i + di, strides[2] * j + dj, k] * -- filter[di, dj, k, q] -- -- Must have `strides[0] = strides[3] = 1`. For the most common case of the same -- horizontal and vertices strides, `strides = [1, stride, stride, 1]`. depthwiseConv2dNative :: forall v'1 v'2 t . (OneOf '[Double, Float] t) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 t -- ^ __filter__ -> Tensor Build t -- ^ __output__ depthwiseConv2dNative = depthwiseConv2dNative' id depthwiseConv2dNative' :: forall v'1 v'2 t . (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 t -- ^ __filter__ -> Tensor Build t -- ^ __output__ depthwiseConv2dNative' op'options input filter | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs filter] return (opDef "DepthwiseConv2dNative" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "filter" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "strides" type: "list(int)" description: "1-D of length 4. The stride of the sliding window for each dimension\nof `input`." } attr { name: "padding" type: "string" description: "The type of padding algorithm to use." allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "data_format" type: "string" default_value { s: "NHWC" } description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n [batch, height, width, channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n [batch, channels, height, width]." allowed_values { list { s: "NHWC" s: "NCHW" } } } -} -- | Computes the gradients of depthwise convolution with respect to the filter. depthwiseConv2dNativeBackpropFilter :: forall v'1 v'2 v'3 t . (OneOf '[Double, Float] t) => Tensor v'1 t -- ^ __input__: 4-D with shape based on `data_format`. For example, if -- `data_format` is 'NHWC' then `input` is a 4-D `[batch, in_height, -- in_width, in_channels]` tensor. -> Tensor v'2 Data.Int.Int32 -- ^ __filter_sizes__: An integer vector representing the tensor shape of `filter`, -- where `filter` is a 4-D -- `[filter_height, filter_width, in_channels, depthwise_multiplier]` tensor. -> Tensor v'3 t -- ^ __out_backprop__: 4-D with shape based on `data_format`. -- For example, if `data_format` is 'NHWC' then -- out_backprop shape is `[batch, out_height, out_width, out_channels]`. -- Gradients w.r.t. the output of the convolution. -> Tensor Build t -- ^ __output__: 4-D with shape -- `[filter_height, filter_width, in_channels, out_channels]`. Gradient w.r.t. -- the `filter` input of the convolution. depthwiseConv2dNativeBackpropFilter = depthwiseConv2dNativeBackpropFilter' id depthwiseConv2dNativeBackpropFilter' :: forall v'1 v'2 v'3 t . (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__: 4-D with shape based on `data_format`. For example, if -- `data_format` is 'NHWC' then `input` is a 4-D `[batch, in_height, -- in_width, in_channels]` tensor. -> Tensor v'2 Data.Int.Int32 -- ^ __filter_sizes__: An integer vector representing the tensor shape of `filter`, -- where `filter` is a 4-D -- `[filter_height, filter_width, in_channels, depthwise_multiplier]` tensor. -> Tensor v'3 t -- ^ __out_backprop__: 4-D with shape based on `data_format`. -- For example, if `data_format` is 'NHWC' then -- out_backprop shape is `[batch, out_height, out_width, out_channels]`. -- Gradients w.r.t. the output of the convolution. -> Tensor Build t -- ^ __output__: 4-D with shape -- `[filter_height, filter_width, in_channels, out_channels]`. Gradient w.r.t. -- the `filter` input of the convolution. depthwiseConv2dNativeBackpropFilter' op'options input filter_sizes out_backprop | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs filter_sizes, buildInputs out_backprop] return (opDef "DepthwiseConv2dNativeBackpropFilter" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "4-D with shape based on `data_format`. For example, if\n`data_format` is \'NHWC\' then `input` is a 4-D `[batch, in_height,\nin_width, in_channels]` tensor." type_attr: "T" } input_arg { name: "filter_sizes" description: "An integer vector representing the tensor shape of `filter`,\nwhere `filter` is a 4-D\n`[filter_height, filter_width, in_channels, depthwise_multiplier]` tensor." type: DT_INT32 } input_arg { name: "out_backprop" description: "4-D with shape based on `data_format`.\nFor example, if `data_format` is \'NHWC\' then\nout_backprop shape is `[batch, out_height, out_width, out_channels]`.\nGradients w.r.t. the output of the convolution." type_attr: "T" } output_arg { name: "output" description: "4-D with shape\n`[filter_height, filter_width, in_channels, out_channels]`. Gradient w.r.t.\nthe `filter` input of the convolution." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "strides" type: "list(int)" description: "The stride of the sliding window for each dimension of the input\nof the convolution." } attr { name: "padding" type: "string" description: "The type of padding algorithm to use." allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "data_format" type: "string" default_value { s: "NHWC" } description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n [batch, height, width, channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n [batch, channels, height, width]." allowed_values { list { s: "NHWC" s: "NCHW" } } } -} -- | Computes the gradients of depthwise convolution with respect to the input. depthwiseConv2dNativeBackpropInput :: forall v'1 v'2 v'3 t . (OneOf '[Double, Float] t) => Tensor v'1 Data.Int.Int32 -- ^ __input_sizes__: An integer vector representing the shape of `input`, based -- on `data_format`. For example, if `data_format` is 'NHWC' then -- `input` is a 4-D `[batch, height, width, channels]` tensor. -> Tensor v'2 t -- ^ __filter__: 4-D with shape -- `[filter_height, filter_width, in_channels, depthwise_multiplier]`. -> Tensor v'3 t -- ^ __out_backprop__: 4-D with shape based on `data_format`. -- For example, if `data_format` is 'NHWC' then -- out_backprop shape is `[batch, out_height, out_width, out_channels]`. -- Gradients w.r.t. the output of the convolution. -> Tensor Build t -- ^ __output__: 4-D with shape according to `data_format`. For example, if -- `data_format` is 'NHWC', output shape is `[batch, in_height, -- in_width, in_channels]`. Gradient w.r.t. the input of the -- convolution. depthwiseConv2dNativeBackpropInput = depthwiseConv2dNativeBackpropInput' id depthwiseConv2dNativeBackpropInput' :: forall v'1 v'2 v'3 t . (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 Data.Int.Int32 -- ^ __input_sizes__: An integer vector representing the shape of `input`, based -- on `data_format`. For example, if `data_format` is 'NHWC' then -- `input` is a 4-D `[batch, height, width, channels]` tensor. -> Tensor v'2 t -- ^ __filter__: 4-D with shape -- `[filter_height, filter_width, in_channels, depthwise_multiplier]`. -> Tensor v'3 t -- ^ __out_backprop__: 4-D with shape based on `data_format`. -- For example, if `data_format` is 'NHWC' then -- out_backprop shape is `[batch, out_height, out_width, out_channels]`. -- Gradients w.r.t. the output of the convolution. -> Tensor Build t -- ^ __output__: 4-D with shape according to `data_format`. For example, if -- `data_format` is 'NHWC', output shape is `[batch, in_height, -- in_width, in_channels]`. Gradient w.r.t. the input of the -- convolution. depthwiseConv2dNativeBackpropInput' op'options input_sizes filter out_backprop | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_sizes, buildInputs filter, buildInputs out_backprop] return (opDef "DepthwiseConv2dNativeBackpropInput" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_sizes" description: "An integer vector representing the shape of `input`, based\non `data_format`. For example, if `data_format` is \'NHWC\' then\n `input` is a 4-D `[batch, height, width, channels]` tensor." type: DT_INT32 } input_arg { name: "filter" description: "4-D with shape\n`[filter_height, filter_width, in_channels, depthwise_multiplier]`." type_attr: "T" } input_arg { name: "out_backprop" description: "4-D with shape based on `data_format`.\nFor example, if `data_format` is \'NHWC\' then\nout_backprop shape is `[batch, out_height, out_width, out_channels]`.\nGradients w.r.t. the output of the convolution." type_attr: "T" } output_arg { name: "output" description: "4-D with shape according to `data_format`. For example, if\n`data_format` is \'NHWC\', output shape is `[batch, in_height,\nin_width, in_channels]`. Gradient w.r.t. the input of the\nconvolution." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "strides" type: "list(int)" description: "The stride of the sliding window for each dimension of the input\nof the convolution." } attr { name: "padding" type: "string" description: "The type of padding algorithm to use." allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "data_format" type: "string" default_value { s: "NHWC" } description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n [batch, height, width, channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n [batch, channels, height, width]." allowed_values { list { s: "NHWC" s: "NCHW" } } } -} -- | Dequantize the 'input' tensor into a float Tensor. -- -- [min_range, max_range] are scalar floats that specify the range for -- the 'input' data. The 'mode' attribute controls exactly which calculations are -- used to convert the float values to their quantized equivalents. -- -- In 'MIN_COMBINED' mode, each value of the tensor will undergo the following: -- -- ``` -- if T == qint8, in[i] += (range(T) + 1)/ 2.0 -- out[i] = min_range + (in[i]* (max_range - min_range) / range(T)) -- ``` -- here `range(T) = numeric_limits::max() - numeric_limits::min()` -- -- *MIN_COMBINED Mode Example* -- -- If the input comes from a QuantizedRelu6, the output type is -- quint8 (range of 0-255) but the possible range of QuantizedRelu6 is -- 0-6. The min_range and max_range values are therefore 0.0 and 6.0. -- Dequantize on quint8 will take each value, cast to float, and multiply -- by 6 / 255. -- Note that if quantizedtype is qint8, the operation will additionally add -- each value by 128 prior to casting. -- -- If the mode is 'MIN_FIRST', then this approach is used: -- -- ``` -- number_of_steps = 1 << (# of bits in T) -- range_adjust = number_of_steps / (number_of_steps - 1) -- range = (range_max - range_min) * range_adjust -- range_scale = range / number_of_steps -- const double offset_input = static_cast(input) - lowest_quantized; -- result = range_min + ((input - numeric_limits::min()) * range_scale) -- ``` dequantize :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] t) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 Float -- ^ __min_range__: The minimum scalar value possibly produced for the input. -> Tensor v'3 Float -- ^ __max_range__: The maximum scalar value possibly produced for the input. -> Tensor Build Float -- ^ __output__ dequantize = dequantize' id dequantize' :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 Float -- ^ __min_range__: The minimum scalar value possibly produced for the input. -> Tensor v'3 Float -- ^ __max_range__: The maximum scalar value possibly produced for the input. -> Tensor Build Float -- ^ __output__ dequantize' op'options input min_range max_range | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs min_range, buildInputs max_range] return (opDef "Dequantize" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "min_range" description: "The minimum scalar value possibly produced for the input." type: DT_FLOAT } input_arg { name: "max_range" description: "The maximum scalar value possibly produced for the input." type: DT_FLOAT } output_arg { name: "output" type: DT_FLOAT } attr { name: "T" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT16 type: DT_QUINT16 type: DT_QINT32 } } } attr { name: "mode" type: "string" default_value { s: "MIN_COMBINED" } allowed_values { list { s: "MIN_COMBINED" s: "MIN_FIRST" } } } -} -- | Deserialize and concatenate `SparseTensors` from a serialized minibatch. -- -- The input `serialized_sparse` must be a string matrix of shape `[N x 3]` where -- `N` is the minibatch size and the rows correspond to packed outputs of -- `SerializeSparse`. The ranks of the original `SparseTensor` objects -- must all match. When the final `SparseTensor` is created, it has rank one -- higher than the ranks of the incoming `SparseTensor` objects -- (they have been concatenated along a new row dimension). -- -- The output `SparseTensor` object's shape values for all dimensions but the -- first are the max across the input `SparseTensor` objects' shape values -- for the corresponding dimensions. Its first shape value is `N`, the minibatch -- size. -- -- The input `SparseTensor` objects' indices are assumed ordered in -- standard lexicographic order. If this is not the case, after this -- step run `SparseReorder` to restore index ordering. -- -- For example, if the serialized input is a `[2 x 3]` matrix representing two -- original `SparseTensor` objects: -- -- index = [ 0] -- [10] -- [20] -- values = [1, 2, 3] -- shape = [50] -- -- and -- -- index = [ 2] -- [10] -- values = [4, 5] -- shape = [30] -- -- then the final deserialized `SparseTensor` will be: -- -- index = [0 0] -- [0 10] -- [0 20] -- [1 2] -- [1 10] -- values = [1, 2, 3, 4, 5] -- shape = [2 50] deserializeManySparse :: forall v'1 dtype . (TensorType dtype) => Tensor v'1 Data.ByteString.ByteString -- ^ __serialized_sparse__: 2-D, The `N` serialized `SparseTensor` objects. -- Must have 3 columns. -> (Tensor Build Data.Int.Int64, Tensor Build dtype, Tensor Build Data.Int.Int64) -- ^ (__sparse_indices__, __sparse_values__, __sparse_shape__) -- -- * __sparse_indices__ -- -- * __sparse_values__ -- -- * __sparse_shape__ deserializeManySparse = deserializeManySparse' id deserializeManySparse' :: forall v'1 dtype . (TensorType dtype) => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __serialized_sparse__: 2-D, The `N` serialized `SparseTensor` objects. -- Must have 3 columns. -> (Tensor Build Data.Int.Int64, Tensor Build dtype, Tensor Build Data.Int.Int64) -- ^ (__sparse_indices__, __sparse_values__, __sparse_shape__) -- -- * __sparse_indices__ -- -- * __sparse_values__ -- -- * __sparse_shape__ deserializeManySparse' op'options serialized_sparse | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs serialized_sparse] return (opDef "DeserializeManySparse" & opAttr "dtype" .~ tensorType (undefined :: dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "serialized_sparse" description: "2-D, The `N` serialized `SparseTensor` objects.\nMust have 3 columns." type: DT_STRING } output_arg { name: "sparse_indices" type: DT_INT64 } output_arg { name: "sparse_values" type_attr: "dtype" } output_arg { name: "sparse_shape" type: DT_INT64 } attr { name: "dtype" type: "type" description: "The `dtype` of the serialized `SparseTensor` objects." } -} -- | Deletes the resource specified by the handle. -- -- All subsequent operations using the resource will result in a NotFound -- error status. destroyResourceOp :: forall v'1 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __resource__: handle to the resource to delete. -> m' (ControlNode) destroyResourceOp = destroyResourceOp' id destroyResourceOp' :: forall v'1 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __resource__: handle to the resource to delete. -> m' (ControlNode) destroyResourceOp' op'options resource | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs resource] buildOp [] (opDef "DestroyResourceOp" & op'options & opInputs .~ op'inputs) {- input_arg { name: "resource" description: "handle to the resource to delete." type: DT_RESOURCE } attr { name: "ignore_lookup_error" type: "bool" default_value { b: true } description: "whether to ignore the error when the resource\ndoesn\'t exist." } -} -- | Destroys the temporary variable and returns its final value. -- -- Sets output to the value of the Tensor pointed to by 'ref', then destroys -- the temporary variable called 'var_name'. -- All other uses of 'ref' *must* have executed before this op. -- This is typically achieved by chaining the ref through each assign op, or by -- using control dependencies. -- -- Outputs the final value of the tensor pointed to by 'ref'. destroyTemporaryVariable :: forall t m' . (MonadBuild m', TensorType t) => Tensor Ref t -- ^ __ref__: A reference to the temporary variable tensor. -> m' (Tensor Value t) -- ^ __value__ destroyTemporaryVariable = destroyTemporaryVariable' id destroyTemporaryVariable' :: forall t m' . (MonadBuild m', TensorType t) => OpParams -> Tensor Ref t -- ^ __ref__: A reference to the temporary variable tensor. -> m' (Tensor Value t) -- ^ __value__ destroyTemporaryVariable' op'options ref | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs ref] buildOp [] (opDef "DestroyTemporaryVariable" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "ref" description: "A reference to the temporary variable tensor." type_attr: "T" is_ref: true } output_arg { name: "value" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "var_name" type: "string" description: "Name of the temporary variable, usually the name of the matching\n\'TemporaryVariable\' op." } -} -- | Returns a diagonal tensor with a given diagonal values. -- -- Given a `diagonal`, this operation returns a tensor with the `diagonal` and -- everything else padded with zeros. The diagonal is computed as follows: -- -- Assume `diagonal` has dimensions [D1,..., Dk], then the output is a tensor of -- rank 2k with dimensions [D1,..., Dk, D1,..., Dk] where: -- -- `output[i1,..., ik, i1,..., ik] = diagonal[i1, ..., ik]` and 0 everywhere else. -- -- For example: -- -- ```prettyprint -- # 'diagonal' is [1, 2, 3, 4] -- tf.diag(diagonal) ==> [[1, 0, 0, 0] -- [0, 2, 0, 0] -- [0, 0, 3, 0] -- [0, 0, 0, 4]] -- ``` diag :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Double, Float] t) => Tensor v'1 t -- ^ __diagonal__: Rank k tensor where k is at most 3. -> Tensor Build t -- ^ __output__ diag = diag' id diag' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __diagonal__: Rank k tensor where k is at most 3. -> Tensor Build t -- ^ __output__ diag' op'options diagonal | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs diagonal] return (opDef "Diag" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "diagonal" description: "Rank k tensor where k is at most 3." type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | Returns the diagonal part of the tensor. -- -- This operation returns a tensor with the `diagonal` part -- of the `input`. The `diagonal` part is computed as follows: -- -- Assume `input` has dimensions `[D1,..., Dk, D1,..., Dk]`, then the output is a -- tensor of rank `k` with dimensions `[D1,..., Dk]` where: -- -- `diagonal[i1,..., ik] = input[i1, ..., ik, i1,..., ik]`. -- -- For example: -- -- ```prettyprint -- # 'input' is [[1, 0, 0, 0] -- [0, 2, 0, 0] -- [0, 0, 3, 0] -- [0, 0, 0, 4]] -- -- tf.diag_part(input) ==> [1, 2, 3, 4] -- ``` diagPart :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Double, Float] t) => Tensor v'1 t -- ^ __input__: Rank k tensor where k is 2, 4, or 6. -> Tensor Build t -- ^ __diagonal__: The extracted diagonal. diagPart = diagPart' id diagPart' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__: Rank k tensor where k is 2, 4, or 6. -> Tensor Build t -- ^ __diagonal__: The extracted diagonal. diagPart' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "DiagPart" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "Rank k tensor where k is 2, 4, or 6." type_attr: "T" } output_arg { name: "diagonal" description: "The extracted diagonal." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | Computes Psi, the derivative of Lgamma (the log of the absolute value of -- -- `Gamma(x)`), element-wise. digamma :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ digamma = digamma' id digamma' :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ digamma' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "Digamma" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } -} -- | Computes the grayscale dilation of 4-D `input` and 3-D `filter` tensors. -- -- The `input` tensor has shape `[batch, in_height, in_width, depth]` and the -- `filter` tensor has shape `[filter_height, filter_width, depth]`, i.e., each -- input channel is processed independently of the others with its own structuring -- function. The `output` tensor has shape -- `[batch, out_height, out_width, depth]`. The spatial dimensions of the output -- tensor depend on the `padding` algorithm. We currently only support the default -- "NHWC" `data_format`. -- -- In detail, the grayscale morphological 2-D dilation is the max-sum correlation -- (for consistency with `conv2d`, we use unmirrored filters): -- -- output[b, y, x, c] = -- max_{dy, dx} input[b, -- strides[1] * y + rates[1] * dy, -- strides[2] * x + rates[2] * dx, -- c] + -- filter[dy, dx, c] -- -- Max-pooling is a special case when the filter has size equal to the pooling -- kernel size and contains all zeros. -- -- Note on duality: The dilation of `input` by the `filter` is equal to the -- negation of the erosion of `-input` by the reflected `filter`. dilation2D :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __input__: 4-D with shape `[batch, in_height, in_width, depth]`. -> Tensor v'2 t -- ^ __filter__: 3-D with shape `[filter_height, filter_width, depth]`. -> Tensor Build t -- ^ __output__: 4-D with shape `[batch, out_height, out_width, depth]`. dilation2D = dilation2D' id dilation2D' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__: 4-D with shape `[batch, in_height, in_width, depth]`. -> Tensor v'2 t -- ^ __filter__: 3-D with shape `[filter_height, filter_width, depth]`. -> Tensor Build t -- ^ __output__: 4-D with shape `[batch, out_height, out_width, depth]`. dilation2D' op'options input filter | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs filter] return (opDef "Dilation2D" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "4-D with shape `[batch, in_height, in_width, depth]`." type_attr: "T" } input_arg { name: "filter" description: "3-D with shape `[filter_height, filter_width, depth]`." type_attr: "T" } output_arg { name: "output" description: "4-D with shape `[batch, out_height, out_width, depth]`." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF } } } attr { name: "strides" type: "list(int)" description: "The stride of the sliding window for each dimension of the input\ntensor. Must be: `[1, stride_height, stride_width, 1]`." has_minimum: true minimum: 4 } attr { name: "rates" type: "list(int)" description: "The input stride for atrous morphological dilation. Must be:\n`[1, rate_height, rate_width, 1]`." has_minimum: true minimum: 4 } attr { name: "padding" type: "string" description: "The type of padding algorithm to use." allowed_values { list { s: "SAME" s: "VALID" } } } -} -- | Computes the gradient of morphological 2-D dilation with respect to the filter. dilation2DBackpropFilter :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __input__: 4-D with shape `[batch, in_height, in_width, depth]`. -> Tensor v'2 t -- ^ __filter__: 3-D with shape `[filter_height, filter_width, depth]`. -> Tensor v'3 t -- ^ __out_backprop__: 4-D with shape `[batch, out_height, out_width, depth]`. -> Tensor Build t -- ^ __filter_backprop__: 3-D with shape `[filter_height, filter_width, depth]`. dilation2DBackpropFilter = dilation2DBackpropFilter' id dilation2DBackpropFilter' :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__: 4-D with shape `[batch, in_height, in_width, depth]`. -> Tensor v'2 t -- ^ __filter__: 3-D with shape `[filter_height, filter_width, depth]`. -> Tensor v'3 t -- ^ __out_backprop__: 4-D with shape `[batch, out_height, out_width, depth]`. -> Tensor Build t -- ^ __filter_backprop__: 3-D with shape `[filter_height, filter_width, depth]`. dilation2DBackpropFilter' op'options input filter out_backprop | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs filter, buildInputs out_backprop] return (opDef "Dilation2DBackpropFilter" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "4-D with shape `[batch, in_height, in_width, depth]`." type_attr: "T" } input_arg { name: "filter" description: "3-D with shape `[filter_height, filter_width, depth]`." type_attr: "T" } input_arg { name: "out_backprop" description: "4-D with shape `[batch, out_height, out_width, depth]`." type_attr: "T" } output_arg { name: "filter_backprop" description: "3-D with shape `[filter_height, filter_width, depth]`." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF } } } attr { name: "strides" type: "list(int)" description: "1-D of length 4. The stride of the sliding window for each dimension of\nthe input tensor. Must be: `[1, stride_height, stride_width, 1]`." has_minimum: true minimum: 4 } attr { name: "rates" type: "list(int)" description: "1-D of length 4. The input stride for atrous morphological dilation.\nMust be: `[1, rate_height, rate_width, 1]`." has_minimum: true minimum: 4 } attr { name: "padding" type: "string" description: "The type of padding algorithm to use." allowed_values { list { s: "SAME" s: "VALID" } } } -} -- | Computes the gradient of morphological 2-D dilation with respect to the input. dilation2DBackpropInput :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __input__: 4-D with shape `[batch, in_height, in_width, depth]`. -> Tensor v'2 t -- ^ __filter__: 3-D with shape `[filter_height, filter_width, depth]`. -> Tensor v'3 t -- ^ __out_backprop__: 4-D with shape `[batch, out_height, out_width, depth]`. -> Tensor Build t -- ^ __in_backprop__: 4-D with shape `[batch, in_height, in_width, depth]`. dilation2DBackpropInput = dilation2DBackpropInput' id dilation2DBackpropInput' :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__: 4-D with shape `[batch, in_height, in_width, depth]`. -> Tensor v'2 t -- ^ __filter__: 3-D with shape `[filter_height, filter_width, depth]`. -> Tensor v'3 t -- ^ __out_backprop__: 4-D with shape `[batch, out_height, out_width, depth]`. -> Tensor Build t -- ^ __in_backprop__: 4-D with shape `[batch, in_height, in_width, depth]`. dilation2DBackpropInput' op'options input filter out_backprop | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs filter, buildInputs out_backprop] return (opDef "Dilation2DBackpropInput" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "4-D with shape `[batch, in_height, in_width, depth]`." type_attr: "T" } input_arg { name: "filter" description: "3-D with shape `[filter_height, filter_width, depth]`." type_attr: "T" } input_arg { name: "out_backprop" description: "4-D with shape `[batch, out_height, out_width, depth]`." type_attr: "T" } output_arg { name: "in_backprop" description: "4-D with shape `[batch, in_height, in_width, depth]`." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF } } } attr { name: "strides" type: "list(int)" description: "1-D of length 4. The stride of the sliding window for each dimension of\nthe input tensor. Must be: `[1, stride_height, stride_width, 1]`." has_minimum: true minimum: 4 } attr { name: "rates" type: "list(int)" description: "1-D of length 4. The input stride for atrous morphological dilation.\nMust be: `[1, rate_height, rate_width, 1]`." has_minimum: true minimum: 4 } attr { name: "padding" type: "string" description: "The type of padding algorithm to use." allowed_values { list { s: "SAME" s: "VALID" } } } -} -- | Returns x / y element-wise. -- -- *NOTE*: `Div` supports broadcasting. More about broadcasting -- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) div :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ div = div' id div' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ div' op'options x y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y] return (opDef "Div" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_UINT8 type: DT_INT8 type: DT_UINT16 type: DT_INT16 type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | Draw bounding boxes on a batch of images. -- -- Outputs a copy of `images` but draws on top of the pixels zero or more bounding -- boxes specified by the locations in `boxes`. The coordinates of the each -- bounding box in `boxes` are encoded as `[y_min, x_min, y_max, x_max]`. The -- bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and -- height of the underlying image. -- -- For example, if an image is 100 x 200 pixels and the bounding box is -- `[0.1, 0.2, 0.5, 0.9]`, the bottom-left and upper-right coordinates of the -- bounding box will be `(10, 40)` to `(50, 180)`. -- -- Parts of the bounding box may fall outside the image. drawBoundingBoxes :: forall v'1 v'2 t . (OneOf '[Data.Word.Word16, Float] t) => Tensor v'1 t -- ^ __images__: 4-D with shape `[batch, height, width, depth]`. A batch of images. -> Tensor v'2 Float -- ^ __boxes__: 3-D with shape `[batch, num_bounding_boxes, 4]` containing bounding -- boxes. -> Tensor Build t -- ^ __output__: 4-D with the same shape as `images`. The batch of input images with -- bounding boxes drawn on the images. drawBoundingBoxes = drawBoundingBoxes' id drawBoundingBoxes' :: forall v'1 v'2 t . (OneOf '[Data.Word.Word16, Float] t) => OpParams -> Tensor v'1 t -- ^ __images__: 4-D with shape `[batch, height, width, depth]`. A batch of images. -> Tensor v'2 Float -- ^ __boxes__: 3-D with shape `[batch, num_bounding_boxes, 4]` containing bounding -- boxes. -> Tensor Build t -- ^ __output__: 4-D with the same shape as `images`. The batch of input images with -- bounding boxes drawn on the images. drawBoundingBoxes' op'options images boxes | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs images, buildInputs boxes] return (opDef "DrawBoundingBoxes" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "images" description: "4-D with shape `[batch, height, width, depth]`. A batch of images." type_attr: "T" } input_arg { name: "boxes" description: "3-D with shape `[batch, num_bounding_boxes, 4]` containing bounding\nboxes." type: DT_FLOAT } output_arg { name: "output" description: "4-D with the same shape as `images`. The batch of input images with\nbounding boxes drawn on the images." type_attr: "T" } attr { name: "T" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_FLOAT type: DT_HALF } } } -} -- | Partitions `data` into `num_partitions` tensors using indices from `partitions`. -- -- For each index tuple `js` of size `partitions.ndim`, the slice `data[js, ...]` -- becomes part of `outputs[partitions[js]]`. The slices with `partitions[js] = i` -- are placed in `outputs[i]` in lexicographic order of `js`, and the first -- dimension of `outputs[i]` is the number of entries in `partitions` equal to `i`. -- In detail, -- -- ```python -- outputs[i].shape = [sum(partitions == i)] + data.shape[partitions.ndim:] -- -- outputs[i] = pack([data[js, ...] for js if partitions[js] == i]) -- ``` -- -- `data.shape` must start with `partitions.shape`. -- -- For example: -- -- ```python -- # Scalar partitions. -- partitions = 1 -- num_partitions = 2 -- data = [10, 20] -- outputs[0] = [] # Empty with shape [0, 2] -- outputs[1] = [[10, 20]] -- -- # Vector partitions. -- partitions = [0, 0, 1, 1, 0] -- num_partitions = 2 -- data = [10, 20, 30, 40, 50] -- outputs[0] = [10, 20, 50] -- outputs[1] = [30, 40] -- ``` -- --
-- --
dynamicPartition :: forall v'1 v'2 t . (TensorType t) => Data.Int.Int64 -- ^ __num_partitions__: The number of partitions to output. -> Tensor v'1 t -- ^ __data__ -> Tensor v'2 Data.Int.Int32 -- ^ __partitions__: Any shape. Indices in the range `[0, num_partitions)`. -> [Tensor Build t] -- ^ __outputs__ dynamicPartition = dynamicPartition' id dynamicPartition' :: forall v'1 v'2 t . (TensorType t) => OpParams -> Data.Int.Int64 -- ^ __num_partitions__: The number of partitions to output. -> Tensor v'1 t -- ^ __data__ -> Tensor v'2 Data.Int.Int32 -- ^ __partitions__: Any shape. Indices in the range `[0, num_partitions)`. -> [Tensor Build t] -- ^ __outputs__ dynamicPartition' op'options num_partitions data' partitions | eqLengthGuard [] = pureOp [num_partitions] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs data', buildInputs partitions] return (opDef "DynamicPartition" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "num_partitions" .~ num_partitions & op'options & opInputs .~ op'inputs) {- input_arg { name: "data" type_attr: "T" } input_arg { name: "partitions" description: "Any shape. Indices in the range `[0, num_partitions)`." type: DT_INT32 } output_arg { name: "outputs" type_attr: "T" number_attr: "num_partitions" } attr { name: "num_partitions" type: "int" description: "The number of partitions to output." has_minimum: true minimum: 1 } attr { name: "T" type: "type" } -} -- | Interleave the values from the `data` tensors into a single tensor. -- -- Builds a merged tensor such that -- -- ```python -- merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...] -- ``` -- -- For example, if each `indices[m]` is scalar or vector, we have -- -- ```python -- # Scalar indices: -- merged[indices[m], ...] = data[m][...] -- -- # Vector indices: -- merged[indices[m][i], ...] = data[m][i, ...] -- ``` -- -- Each `data[i].shape` must start with the corresponding `indices[i].shape`, -- and the rest of `data[i].shape` must be constant w.r.t. `i`. That is, we -- must have `data[i].shape = indices[i].shape + constant`. In terms of this -- `constant`, the output shape is -- -- merged.shape = [max(indices)] + constant -- -- Values are merged in order, so if an index appears in both `indices[m][i]` and -- `indices[n][j]` for `(m,i) < (n,j)` the slice `data[n][j]` will appear in the -- merged result. -- -- For example: -- -- ```python -- indices[0] = 6 -- indices[1] = [4, 1] -- indices[2] = [[5, 2], [0, 3]] -- data[0] = [61, 62] -- data[1] = [[41, 42], [11, 12]] -- data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]] -- merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42], -- [51, 52], [61, 62]] -- ``` -- --
-- --
dynamicStitch :: forall v'1 v'2 t . (TensorType t) => [Tensor v'1 Data.Int.Int32] -- ^ __indices__ -> [Tensor v'2 t] -- ^ __data__ -> Tensor Build t -- ^ __merged__ dynamicStitch = dynamicStitch' id dynamicStitch' :: forall v'1 v'2 t . (TensorType t) => OpParams -> [Tensor v'1 Data.Int.Int32] -- ^ __indices__ -> [Tensor v'2 t] -- ^ __data__ -> Tensor Build t -- ^ __merged__ dynamicStitch' op'options indices data' | eqLengthGuard [("N", [("indices", length indices), ("data", length data')])] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs indices, buildInputs data'] return (opDef "DynamicStitch" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "N" .~ n & op'options & opInputs .~ op'inputs) where n = fromIntegral (length indices) :: Int64 {- input_arg { name: "indices" type: DT_INT32 number_attr: "N" } input_arg { name: "data" type_attr: "T" number_attr: "N" } output_arg { name: "merged" type_attr: "T" } attr { name: "N" type: "int" has_minimum: true minimum: 1 } attr { name: "T" type: "type" } -} -- | Computes the (possibly normalized) Levenshtein Edit Distance. -- -- The inputs are variable-length sequences provided by SparseTensors -- (hypothesis_indices, hypothesis_values, hypothesis_shape) -- and -- (truth_indices, truth_values, truth_shape). -- -- The inputs are: editDistance :: forall v'1 v'2 v'3 v'4 v'5 v'6 t . (TensorType t) => Tensor v'1 Data.Int.Int64 -- ^ __hypothesis_indices__: The indices of the hypothesis list SparseTensor. -- This is an N x R int64 matrix. -> Tensor v'2 t -- ^ __hypothesis_values__: The values of the hypothesis list SparseTensor. -- This is an N-length vector. -> Tensor v'3 Data.Int.Int64 -- ^ __hypothesis_shape__: The shape of the hypothesis list SparseTensor. -- This is an R-length vector. -> Tensor v'4 Data.Int.Int64 -- ^ __truth_indices__: The indices of the truth list SparseTensor. -- This is an M x R int64 matrix. -> Tensor v'5 t -- ^ __truth_values__: The values of the truth list SparseTensor. -- This is an M-length vector. -> Tensor v'6 Data.Int.Int64 -- ^ __truth_shape__: truth indices, vector. -> Tensor Build Float -- ^ __output__: A dense float tensor with rank R - 1. -- -- For the example input: -- -- // hypothesis represents a 2x1 matrix with variable-length values: -- // (0,0) = ["a"] -- // (1,0) = ["b"] -- hypothesis_indices = [[0, 0, 0], -- [1, 0, 0]] -- hypothesis_values = ["a", "b"] -- hypothesis_shape = [2, 1, 1] -- -- // truth represents a 2x2 matrix with variable-length values: -- // (0,0) = [] -- // (0,1) = ["a"] -- // (1,0) = ["b", "c"] -- // (1,1) = ["a"] -- truth_indices = [[0, 1, 0], -- [1, 0, 0], -- [1, 0, 1], -- [1, 1, 0]] -- truth_values = ["a", "b", "c", "a"] -- truth_shape = [2, 2, 2] -- normalize = true -- -- The output will be: -- -- // output is a 2x2 matrix with edit distances normalized by truth lengths. -- output = [[inf, 1.0], // (0,0): no truth, (0,1): no hypothesis -- [0.5, 1.0]] // (1,0): addition, (1,1): no hypothesis editDistance = editDistance' id editDistance' :: forall v'1 v'2 v'3 v'4 v'5 v'6 t . (TensorType t) => OpParams -> Tensor v'1 Data.Int.Int64 -- ^ __hypothesis_indices__: The indices of the hypothesis list SparseTensor. -- This is an N x R int64 matrix. -> Tensor v'2 t -- ^ __hypothesis_values__: The values of the hypothesis list SparseTensor. -- This is an N-length vector. -> Tensor v'3 Data.Int.Int64 -- ^ __hypothesis_shape__: The shape of the hypothesis list SparseTensor. -- This is an R-length vector. -> Tensor v'4 Data.Int.Int64 -- ^ __truth_indices__: The indices of the truth list SparseTensor. -- This is an M x R int64 matrix. -> Tensor v'5 t -- ^ __truth_values__: The values of the truth list SparseTensor. -- This is an M-length vector. -> Tensor v'6 Data.Int.Int64 -- ^ __truth_shape__: truth indices, vector. -> Tensor Build Float -- ^ __output__: A dense float tensor with rank R - 1. -- -- For the example input: -- -- // hypothesis represents a 2x1 matrix with variable-length values: -- // (0,0) = ["a"] -- // (1,0) = ["b"] -- hypothesis_indices = [[0, 0, 0], -- [1, 0, 0]] -- hypothesis_values = ["a", "b"] -- hypothesis_shape = [2, 1, 1] -- -- // truth represents a 2x2 matrix with variable-length values: -- // (0,0) = [] -- // (0,1) = ["a"] -- // (1,0) = ["b", "c"] -- // (1,1) = ["a"] -- truth_indices = [[0, 1, 0], -- [1, 0, 0], -- [1, 0, 1], -- [1, 1, 0]] -- truth_values = ["a", "b", "c", "a"] -- truth_shape = [2, 2, 2] -- normalize = true -- -- The output will be: -- -- // output is a 2x2 matrix with edit distances normalized by truth lengths. -- output = [[inf, 1.0], // (0,0): no truth, (0,1): no hypothesis -- [0.5, 1.0]] // (1,0): addition, (1,1): no hypothesis editDistance' op'options hypothesis_indices hypothesis_values hypothesis_shape truth_indices truth_values truth_shape | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs hypothesis_indices, buildInputs hypothesis_values, buildInputs hypothesis_shape, buildInputs truth_indices, buildInputs truth_values, buildInputs truth_shape] return (opDef "EditDistance" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "hypothesis_indices" description: "The indices of the hypothesis list SparseTensor.\nThis is an N x R int64 matrix." type: DT_INT64 } input_arg { name: "hypothesis_values" description: "The values of the hypothesis list SparseTensor.\nThis is an N-length vector." type_attr: "T" } input_arg { name: "hypothesis_shape" description: "The shape of the hypothesis list SparseTensor.\nThis is an R-length vector." type: DT_INT64 } input_arg { name: "truth_indices" description: "The indices of the truth list SparseTensor.\nThis is an M x R int64 matrix." type: DT_INT64 } input_arg { name: "truth_values" description: "The values of the truth list SparseTensor.\nThis is an M-length vector." type_attr: "T" } input_arg { name: "truth_shape" description: "truth indices, vector." type: DT_INT64 } output_arg { name: "output" description: "A dense float tensor with rank R - 1.\n\nFor the example input:\n\n // hypothesis represents a 2x1 matrix with variable-length values:\n // (0,0) = [\"a\"]\n // (1,0) = [\"b\"]\n hypothesis_indices = [[0, 0, 0],\n [1, 0, 0]]\n hypothesis_values = [\"a\", \"b\"]\n hypothesis_shape = [2, 1, 1]\n\n // truth represents a 2x2 matrix with variable-length values:\n // (0,0) = []\n // (0,1) = [\"a\"]\n // (1,0) = [\"b\", \"c\"]\n // (1,1) = [\"a\"]\n truth_indices = [[0, 1, 0],\n [1, 0, 0],\n [1, 0, 1],\n [1, 1, 0]]\n truth_values = [\"a\", \"b\", \"c\", \"a\"]\n truth_shape = [2, 2, 2]\n normalize = true\n\nThe output will be:\n\n // output is a 2x2 matrix with edit distances normalized by truth lengths.\n output = [[inf, 1.0], // (0,0): no truth, (0,1): no hypothesis\n [0.5, 1.0]] // (1,0): addition, (1,1): no hypothesis" type: DT_FLOAT } attr { name: "normalize" type: "bool" default_value { b: true } description: "boolean (if true, edit distances are normalized by length of truth).\n\nThe output is:" } attr { name: "T" type: "type" } -} -- | Computes exponential linear: `exp(features) - 1` if < 0, `features` otherwise. -- -- See [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs) -- ](http://arxiv.org/abs/1511.07289) elu :: forall v'1 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __features__ -> Tensor Build t -- ^ __activations__ elu = elu' id elu' :: forall v'1 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __features__ -> Tensor Build t -- ^ __activations__ elu' op'options features | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs features] return (opDef "Elu" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "features" type_attr: "T" } output_arg { name: "activations" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF } } } -} -- | Computes gradients for the exponential linear (Elu) operation. eluGrad :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __gradients__: The backpropagated gradients to the corresponding Elu operation. -> Tensor v'2 t -- ^ __outputs__: The outputs of the corresponding Elu operation. -> Tensor Build t -- ^ __backprops__: The gradients: `gradients * (outputs + 1)` if outputs < 0, -- `gradients` otherwise. eluGrad = eluGrad' id eluGrad' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __gradients__: The backpropagated gradients to the corresponding Elu operation. -> Tensor v'2 t -- ^ __outputs__: The outputs of the corresponding Elu operation. -> Tensor Build t -- ^ __backprops__: The gradients: `gradients * (outputs + 1)` if outputs < 0, -- `gradients` otherwise. eluGrad' op'options gradients outputs | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs gradients, buildInputs outputs] return (opDef "EluGrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "gradients" description: "The backpropagated gradients to the corresponding Elu operation." type_attr: "T" } input_arg { name: "outputs" description: "The outputs of the corresponding Elu operation." type_attr: "T" } output_arg { name: "backprops" description: "The gradients: `gradients * (outputs + 1)` if outputs < 0,\n`gradients` otherwise." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF } } } -} -- | Encode strings into web-safe base64 format. -- -- Refer to the following article for more information on base64 format: -- en.wikipedia.org/wiki/Base64. Base64 strings may have padding with '=' at the -- end so that the encoded has length multiple of 4. See Padding section of the -- link above. -- -- Web-safe means that the encoder uses - and _ instead of + and /. encodeBase64 :: Tensor v'1 Data.ByteString.ByteString -- ^ __input__: Strings to be encoded. -> Tensor Build Data.ByteString.ByteString -- ^ __output__: Input strings encoded in base64. encodeBase64 = encodeBase64' id encodeBase64' :: OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __input__: Strings to be encoded. -> Tensor Build Data.ByteString.ByteString -- ^ __output__: Input strings encoded in base64. encodeBase64' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "EncodeBase64" & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "Strings to be encoded." type: DT_STRING } output_arg { name: "output" description: "Input strings encoded in base64." type: DT_STRING } attr { name: "pad" type: "bool" default_value { b: false } description: "Bool whether padding is applied at the ends." } -} -- | JPEG-encode an image. -- -- `image` is a 3-D uint8 Tensor of shape `[height, width, channels]`. -- -- The attr `format` can be used to override the color format of the encoded -- output. Values can be: -- -- * `''`: Use a default format based on the number of channels in the image. -- * `grayscale`: Output a grayscale JPEG image. The `channels` dimension -- of `image` must be 1. -- * `rgb`: Output an RGB JPEG image. The `channels` dimension -- of `image` must be 3. -- -- If `format` is not specified or is the empty string, a default format is picked -- in function of the number of channels in `image`: -- -- * 1: Output a grayscale image. -- * 3: Output an RGB image. encodeJpeg :: Tensor v'1 Data.Word.Word8 -- ^ __image__: 3-D with shape `[height, width, channels]`. -> Tensor Build Data.ByteString.ByteString -- ^ __contents__: 0-D. JPEG-encoded image. encodeJpeg = encodeJpeg' id encodeJpeg' :: OpParams -> Tensor v'1 Data.Word.Word8 -- ^ __image__: 3-D with shape `[height, width, channels]`. -> Tensor Build Data.ByteString.ByteString -- ^ __contents__: 0-D. JPEG-encoded image. encodeJpeg' op'options image | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs image] return (opDef "EncodeJpeg" & op'options & opInputs .~ op'inputs) {- input_arg { name: "image" description: "3-D with shape `[height, width, channels]`." type: DT_UINT8 } output_arg { name: "contents" description: "0-D. JPEG-encoded image." type: DT_STRING } attr { name: "format" type: "string" default_value { s: "" } description: "Per pixel image format." allowed_values { list { s: "" s: "grayscale" s: "rgb" } } } attr { name: "quality" type: "int" default_value { i: 95 } description: "Quality of the compression from 0 to 100 (higher is better and slower)." } attr { name: "progressive" type: "bool" default_value { b: false } description: "If True, create a JPEG that loads progressively (coarse to fine)." } attr { name: "optimize_size" type: "bool" default_value { b: false } description: "If True, spend CPU/RAM to reduce size with no quality change." } attr { name: "chroma_downsampling" type: "bool" default_value { b: true } description: "See http://en.wikipedia.org/wiki/Chroma_subsampling." } attr { name: "density_unit" type: "string" default_value { s: "in" } description: "Unit used to specify `x_density` and `y_density`:\npixels per inch (`\'in\'`) or centimeter (`\'cm\'`)." allowed_values { list { s: "in" s: "cm" } } } attr { name: "x_density" type: "int" default_value { i: 300 } description: "Horizontal pixels per density unit." } attr { name: "y_density" type: "int" default_value { i: 300 } description: "Vertical pixels per density unit." } attr { name: "xmp_metadata" type: "string" default_value { s: "" } description: "If not empty, embed this XMP metadata in the image header." } -} -- | PNG-encode an image. -- -- `image` is a 3-D uint8 or uint16 Tensor of shape `[height, width, channels]` -- where `channels` is: -- -- * 1: for grayscale. -- * 2: for grayscale + alpha. -- * 3: for RGB. -- * 4: for RGBA. -- -- The ZLIB compression level, `compression`, can be -1 for the PNG-encoder -- default or a value from 0 to 9. 9 is the highest compression level, generating -- the smallest output, but is slower. encodePng :: forall v'1 t . (OneOf '[Data.Word.Word16, Data.Word.Word8] t) => Tensor v'1 t -- ^ __image__: 3-D with shape `[height, width, channels]`. -> Tensor Build Data.ByteString.ByteString -- ^ __contents__: 0-D. PNG-encoded image. encodePng = encodePng' id encodePng' :: forall v'1 t . (OneOf '[Data.Word.Word16, Data.Word.Word8] t) => OpParams -> Tensor v'1 t -- ^ __image__: 3-D with shape `[height, width, channels]`. -> Tensor Build Data.ByteString.ByteString -- ^ __contents__: 0-D. PNG-encoded image. encodePng' op'options image | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs image] return (opDef "EncodePng" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "image" description: "3-D with shape `[height, width, channels]`." type_attr: "T" } output_arg { name: "contents" description: "0-D. PNG-encoded image." type: DT_STRING } attr { name: "compression" type: "int" default_value { i: -1 } description: "Compression level." } attr { name: "T" type: "type" default_value { type: DT_UINT8 } allowed_values { list { type: DT_UINT8 type: DT_UINT16 } } } -} -- | Creates or finds a child frame, and makes `data` available to the child frame. -- -- This op is used together with `Exit` to create loops in the graph. -- The unique `frame_name` is used by the `Executor` to identify frames. If -- `is_constant` is true, `output` is a constant in the child frame; otherwise -- it may be changed in the child frame. At most `parallel_iterations` iterations -- are run in parallel in the child frame. enter :: forall v'1 t . (TensorType t) => Tensor v'1 t -- ^ __data__: The tensor to be made available to the child frame. -> Tensor Build t -- ^ __output__: The same tensor as `data`. enter = enter' id enter' :: forall v'1 t . (TensorType t) => OpParams -> Tensor v'1 t -- ^ __data__: The tensor to be made available to the child frame. -> Tensor Build t -- ^ __output__: The same tensor as `data`. enter' op'options data' | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs data'] return (opDef "Enter" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "data" description: "The tensor to be made available to the child frame." type_attr: "T" } output_arg { name: "output" description: "The same tensor as `data`." type_attr: "T" } attr { name: "T" type: "type" } attr { name: "frame_name" type: "string" description: "The name of the child frame." } attr { name: "is_constant" type: "bool" default_value { b: false } description: "If true, the output is constant within the child frame." } attr { name: "parallel_iterations" type: "int" default_value { i: 10 } description: "The number of iterations allowed to run in parallel." } -} -- | Returns the truth value of (x == y) element-wise. -- -- *NOTE*: `Equal` supports broadcasting. More about broadcasting -- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) equal :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Bool, Data.ByteString.ByteString, Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build Bool -- ^ __z__ equal = equal' id equal' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Bool, Data.ByteString.ByteString, Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build Bool -- ^ __z__ equal' op'options x y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y] return (opDef "Equal" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type: DT_BOOL } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_UINT8 type: DT_INT8 type: DT_INT16 type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_QUINT8 type: DT_QINT8 type: DT_QINT32 type: DT_STRING type: DT_BOOL type: DT_COMPLEX128 } } } -} -- | Computes the Gauss error function of `x` element-wise. erf :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ erf = erf' id erf' :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ erf' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "Erf" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } -} -- | Computes the complementary error function of `x` element-wise. erfc :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ erfc = erfc' id erfc' :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ erfc' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "Erfc" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } -} -- | Exits the current frame to its parent frame. -- -- Exit makes its input `data` available to the parent frame. exit :: forall v'1 t . (TensorType t) => Tensor v'1 t -- ^ __data__: The tensor to be made available to the parent frame. -> Tensor Build t -- ^ __output__: The same tensor as `data`. exit = exit' id exit' :: forall v'1 t . (TensorType t) => OpParams -> Tensor v'1 t -- ^ __data__: The tensor to be made available to the parent frame. -> Tensor Build t -- ^ __output__: The same tensor as `data`. exit' op'options data' | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs data'] return (opDef "Exit" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "data" description: "The tensor to be made available to the parent frame." type_attr: "T" } output_arg { name: "output" description: "The same tensor as `data`." type_attr: "T" } attr { name: "T" type: "type" } -} -- | Computes exponential of x element-wise. \\(y = e^x\\). exp :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ exp = exp' id exp' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ exp' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "Exp" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | Inserts a dimension of 1 into a tensor's shape. -- -- Given a tensor `input`, this operation inserts a dimension of 1 at the -- dimension index `dim` of `input`'s shape. The dimension index `dim` starts at -- zero; if you specify a negative number for `dim` it is counted backward from -- the end. -- -- This operation is useful if you want to add a batch dimension to a single -- element. For example, if you have a single image of shape `[height, width, -- channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`, -- which will make the shape `[1, height, width, channels]`. -- -- Other examples: -- -- ```prettyprint -- # 't' is a tensor of shape [2] -- shape(expand_dims(t, 0)) ==> [1, 2] -- shape(expand_dims(t, 1)) ==> [2, 1] -- shape(expand_dims(t, -1)) ==> [2, 1] -- -- # 't2' is a tensor of shape [2, 3, 5] -- shape(expand_dims(t2, 0)) ==> [1, 2, 3, 5] -- shape(expand_dims(t2, 2)) ==> [2, 3, 1, 5] -- shape(expand_dims(t2, 3)) ==> [2, 3, 5, 1] -- ``` -- -- This operation requires that: -- -- `-1-input.dims() <= dim <= input.dims()` -- -- This operation is related to `squeeze()`, which removes dimensions of -- size 1. expandDims :: forall v'1 v'2 t tdim . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tdim) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 tdim -- ^ __dim__: 0-D (scalar). Specifies the dimension index at which to -- expand the shape of `input`. -> Tensor Build t -- ^ __output__: Contains the same data as `input`, but its shape has an additional -- dimension of size 1 added. expandDims = expandDims' id expandDims' :: forall v'1 v'2 t tdim . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tdim) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 tdim -- ^ __dim__: 0-D (scalar). Specifies the dimension index at which to -- expand the shape of `input`. -> Tensor Build t -- ^ __output__: Contains the same data as `input`, but its shape has an additional -- dimension of size 1 added. expandDims' op'options input dim | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs dim] return (opDef "ExpandDims" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tdim" .~ tensorType (undefined :: tdim) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "dim" description: "0-D (scalar). Specifies the dimension index at which to\nexpand the shape of `input`." type_attr: "Tdim" } output_arg { name: "output" description: "Contains the same data as `input`, but its shape has an additional\ndimension of size 1 added." type_attr: "T" } attr { name: "T" type: "type" } attr { name: "Tdim" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | Computes exponential of x - 1 element-wise. -- -- I.e., \\(y = (\exp x) - 1\\). expm1 :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ expm1 = expm1' id expm1' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ expm1' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "Expm1" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | Extracts a glimpse from the input tensor. -- -- Returns a set of windows called glimpses extracted at location -- `offsets` from the input tensor. If the windows only partially -- overlaps the inputs, the non overlapping areas will be filled with -- random noise. -- -- The result is a 4-D tensor of shape `[batch_size, glimpse_height, -- glimpse_width, channels]`. The channels and batch dimensions are the -- same as that of the input tensor. The height and width of the output -- windows are specified in the `size` parameter. -- -- The argument `normalized` and `centered` controls how the windows are built: -- -- * If the coordinates are normalized but not centered, 0.0 and 1.0 -- correspond to the minimum and maximum of each height and width -- dimension. -- * If the coordinates are both normalized and centered, they range from -- -1.0 to 1.0. The coordinates (-1.0, -1.0) correspond to the upper -- left corner, the lower right corner is located at (1.0, 1.0) and the -- center is at (0, 0). -- * If the coordinates are not normalized they are interpreted as -- numbers of pixels. extractGlimpse :: Tensor v'1 Float -- ^ __input__: A 4-D float tensor of shape `[batch_size, height, width, channels]`. -> Tensor v'2 Data.Int.Int32 -- ^ __size__: A 1-D tensor of 2 elements containing the size of the glimpses -- to extract. The glimpse height must be specified first, following -- by the glimpse width. -> Tensor v'3 Float -- ^ __offsets__: A 2-D integer tensor of shape `[batch_size, 2]` containing -- the y, x locations of the center of each window. -> Tensor Build Float -- ^ __glimpse__: A tensor representing the glimpses `[batch_size, -- glimpse_height, glimpse_width, channels]`. extractGlimpse = extractGlimpse' id extractGlimpse' :: OpParams -> Tensor v'1 Float -- ^ __input__: A 4-D float tensor of shape `[batch_size, height, width, channels]`. -> Tensor v'2 Data.Int.Int32 -- ^ __size__: A 1-D tensor of 2 elements containing the size of the glimpses -- to extract. The glimpse height must be specified first, following -- by the glimpse width. -> Tensor v'3 Float -- ^ __offsets__: A 2-D integer tensor of shape `[batch_size, 2]` containing -- the y, x locations of the center of each window. -> Tensor Build Float -- ^ __glimpse__: A tensor representing the glimpses `[batch_size, -- glimpse_height, glimpse_width, channels]`. extractGlimpse' op'options input size offsets | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs size, buildInputs offsets] return (opDef "ExtractGlimpse" & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "A 4-D float tensor of shape `[batch_size, height, width, channels]`." type: DT_FLOAT } input_arg { name: "size" description: "A 1-D tensor of 2 elements containing the size of the glimpses\nto extract. The glimpse height must be specified first, following\nby the glimpse width." type: DT_INT32 } input_arg { name: "offsets" description: "A 2-D integer tensor of shape `[batch_size, 2]` containing\nthe y, x locations of the center of each window." type: DT_FLOAT } output_arg { name: "glimpse" description: "A tensor representing the glimpses `[batch_size,\nglimpse_height, glimpse_width, channels]`." type: DT_FLOAT } attr { name: "centered" type: "bool" default_value { b: true } description: "indicates if the offset coordinates are centered relative to\nthe image, in which case the (0, 0) offset is relative to the center\nof the input images. If false, the (0,0) offset corresponds to the\nupper left corner of the input images." } attr { name: "normalized" type: "bool" default_value { b: true } description: "indicates if the offset coordinates are normalized." } attr { name: "uniform_noise" type: "bool" default_value { b: true } description: "indicates if the noise should be generated using a\nuniform distribution or a Gaussian distribution." } -} -- | Extract `patches` from `images` and put them in the "depth" output dimension. extractImagePatches :: forall v'1 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __images__: 4-D Tensor with shape `[batch, in_rows, in_cols, depth]`. -> Tensor Build t -- ^ __patches__: 4-D Tensor with shape `[batch, out_rows, out_cols, ksize_rows * -- ksize_cols * depth]` containing image patches with size -- `ksize_rows x ksize_cols x depth` vectorized in the "depth" dimension. extractImagePatches = extractImagePatches' id extractImagePatches' :: forall v'1 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __images__: 4-D Tensor with shape `[batch, in_rows, in_cols, depth]`. -> Tensor Build t -- ^ __patches__: 4-D Tensor with shape `[batch, out_rows, out_cols, ksize_rows * -- ksize_cols * depth]` containing image patches with size -- `ksize_rows x ksize_cols x depth` vectorized in the "depth" dimension. extractImagePatches' op'options images | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs images] return (opDef "ExtractImagePatches" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "images" description: "4-D Tensor with shape `[batch, in_rows, in_cols, depth]`." type_attr: "T" } output_arg { name: "patches" description: "4-D Tensor with shape `[batch, out_rows, out_cols, ksize_rows *\nksize_cols * depth]` containing image patches with size\n`ksize_rows x ksize_cols x depth` vectorized in the \"depth\" dimension." type_attr: "T" } attr { name: "ksizes" type: "list(int)" description: "The size of the sliding window for each dimension of `images`." has_minimum: true minimum: 4 } attr { name: "strides" type: "list(int)" description: "1-D of length 4. How far the centers of two consecutive patches are in\nthe images. Must be: `[1, stride_rows, stride_cols, 1]`." has_minimum: true minimum: 4 } attr { name: "rates" type: "list(int)" description: "1-D of length 4. Must be: `[1, rate_rows, rate_cols, 1]`. This is the\ninput stride, specifying how far two consecutive patch samples are in the\ninput. Equivalent to extracting patches with\n`patch_sizes_eff = patch_sizes + (patch_sizes - 1) * (rates - 1)`, followed by\nsubsampling them spatially by a factor of `rates`." has_minimum: true minimum: 4 } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF } } } attr { name: "padding" type: "string" description: "The type of padding algorithm to use.\n\nWe specify the size-related attributes as:\n\n```python\n ksizes = [1, ksize_rows, ksize_cols, 1]\n strides = [1, strides_rows, strides_cols, 1]\n rates = [1, rates_rows, rates_cols, 1]\n```" allowed_values { list { s: "SAME" s: "VALID" } } } -} -- | Compute the 1-dimensional discrete Fourier Transform over the inner-most -- -- dimension of `input`. fFT :: Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__: A complex64 tensor. -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__: A complex64 tensor of the same shape as `input`. The inner-most -- dimension of `input` is replaced with its 1D Fourier Transform. -- -- @compatibility(numpy) -- Equivalent to np.fft.fft -- @end_compatibility fFT = fFT' id fFT' :: OpParams -> Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__: A complex64 tensor. -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__: A complex64 tensor of the same shape as `input`. The inner-most -- dimension of `input` is replaced with its 1D Fourier Transform. -- -- @compatibility(numpy) -- Equivalent to np.fft.fft -- @end_compatibility fFT' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "FFT" & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "A complex64 tensor." type: DT_COMPLEX64 } output_arg { name: "output" description: "A complex64 tensor of the same shape as `input`. The inner-most\n dimension of `input` is replaced with its 1D Fourier Transform.\n\n@compatibility(numpy)\nEquivalent to np.fft.fft\n@end_compatibility" type: DT_COMPLEX64 } -} -- | Compute the 2-dimensional discrete Fourier Transform over the inner-most -- -- 2 dimensions of `input`. fFT2D :: Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__: A complex64 tensor. -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__: A complex64 tensor of the same shape as `input`. The inner-most 2 -- dimensions of `input` are replaced with their 2D Fourier Transform. -- -- @compatibility(numpy) -- Equivalent to np.fft.fft2 -- @end_compatibility fFT2D = fFT2D' id fFT2D' :: OpParams -> Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__: A complex64 tensor. -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__: A complex64 tensor of the same shape as `input`. The inner-most 2 -- dimensions of `input` are replaced with their 2D Fourier Transform. -- -- @compatibility(numpy) -- Equivalent to np.fft.fft2 -- @end_compatibility fFT2D' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "FFT2D" & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "A complex64 tensor." type: DT_COMPLEX64 } output_arg { name: "output" description: "A complex64 tensor of the same shape as `input`. The inner-most 2\n dimensions of `input` are replaced with their 2D Fourier Transform.\n\n@compatibility(numpy)\nEquivalent to np.fft.fft2\n@end_compatibility" type: DT_COMPLEX64 } -} -- | Compute the 3-dimensional discrete Fourier Transform over the inner-most 3 -- -- dimensions of `input`. fFT3D :: Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__: A complex64 tensor. -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__: A complex64 tensor of the same shape as `input`. The inner-most 3 -- dimensions of `input` are replaced with their 3D Fourier Transform. -- -- @compatibility(numpy) -- Equivalent to np.fft.fftn with 3 dimensions. -- @end_compatibility fFT3D = fFT3D' id fFT3D' :: OpParams -> Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__: A complex64 tensor. -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__: A complex64 tensor of the same shape as `input`. The inner-most 3 -- dimensions of `input` are replaced with their 3D Fourier Transform. -- -- @compatibility(numpy) -- Equivalent to np.fft.fftn with 3 dimensions. -- @end_compatibility fFT3D' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "FFT3D" & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "A complex64 tensor." type: DT_COMPLEX64 } output_arg { name: "output" description: "A complex64 tensor of the same shape as `input`. The inner-most 3\n dimensions of `input` are replaced with their 3D Fourier Transform.\n\n@compatibility(numpy)\nEquivalent to np.fft.fftn with 3 dimensions.\n@end_compatibility" type: DT_COMPLEX64 } -} -- | A queue that produces elements in first-in first-out order. fIFOQueue :: forall m' . (MonadBuild m') => [DataType] -- ^ __component_types__: The type of each component in a value. -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __handle__: The handle to the queue. fIFOQueue = fIFOQueue' id fIFOQueue' :: forall m' . (MonadBuild m') => OpParams -> [DataType] -- ^ __component_types__: The type of each component in a value. -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __handle__: The handle to the queue. fIFOQueue' op'options component_types | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "FIFOQueue" & opAttr "component_types" .~ component_types & op'options & opInputs .~ op'inputs) {- output_arg { name: "handle" description: "The handle to the queue." type: DT_STRING is_ref: true } attr { name: "component_types" type: "list(type)" description: "The type of each component in a value." has_minimum: true minimum: 1 } attr { name: "shapes" type: "list(shape)" default_value { list { } } description: "The shape of each component in a value. The length of this attr must\nbe either 0 or the same as the length of component_types. If the length of\nthis attr is 0, the shapes of queue elements are not constrained, and\nonly one element may be dequeued at a time." has_minimum: true } attr { name: "capacity" type: "int" default_value { i: -1 } description: "The upper bound on the number of elements in this queue.\nNegative numbers mean no limit." } attr { name: "container" type: "string" default_value { s: "" } description: "If non-empty, this queue is placed in the given container.\nOtherwise, a default container is used." } attr { name: "shared_name" type: "string" default_value { s: "" } description: "If non-empty, this queue will be shared under the given name\nacross multiple sessions." } -} -- | A queue that produces elements in first-in first-out order. fIFOQueueV2 :: forall m' . (MonadBuild m') => [DataType] -- ^ __component_types__: The type of each component in a value. -> m' (Tensor Value ResourceHandle) -- ^ __handle__: The handle to the queue. fIFOQueueV2 = fIFOQueueV2' id fIFOQueueV2' :: forall m' . (MonadBuild m') => OpParams -> [DataType] -- ^ __component_types__: The type of each component in a value. -> m' (Tensor Value ResourceHandle) -- ^ __handle__: The handle to the queue. fIFOQueueV2' op'options component_types | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "FIFOQueueV2" & opAttr "component_types" .~ component_types & op'options & opInputs .~ op'inputs) {- output_arg { name: "handle" description: "The handle to the queue." type: DT_RESOURCE } attr { name: "component_types" type: "list(type)" description: "The type of each component in a value." has_minimum: true minimum: 1 } attr { name: "shapes" type: "list(shape)" default_value { list { } } description: "The shape of each component in a value. The length of this attr must\nbe either 0 or the same as the length of component_types. If the length of\nthis attr is 0, the shapes of queue elements are not constrained, and\nonly one element may be dequeued at a time." has_minimum: true } attr { name: "capacity" type: "int" default_value { i: -1 } description: "The upper bound on the number of elements in this queue.\nNegative numbers mean no limit." } attr { name: "container" type: "string" default_value { s: "" } description: "If non-empty, this queue is placed in the given container.\nOtherwise, a default container is used." } attr { name: "shared_name" type: "string" default_value { s: "" } description: "If non-empty, this queue will be shared under the given name\nacross multiple sessions." } -} -- | Output a fact about factorials. fact :: Tensor Build Data.ByteString.ByteString -- ^ __fact__ fact = fact' id fact' :: OpParams -> Tensor Build Data.ByteString.ByteString -- ^ __fact__ fact' op'options | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] return (opDef "Fact" & op'options & opInputs .~ op'inputs) {- output_arg { name: "fact" type: DT_STRING } -} -- | Fake-quantize the 'inputs' tensor, type float to 'outputs' tensor of same type. -- -- Attributes [min; max] define the clamping range for the 'inputs' data. Op -- divides this range into 255 steps (total of 256 values), then replaces each -- 'inputs' value with the closest of the quantized step values. -- -- Quantization is called fake since the output is still in floating point. fakeQuantWithMinMaxArgs :: Tensor v'1 Float -- ^ __inputs__ -> Tensor Build Float -- ^ __outputs__ fakeQuantWithMinMaxArgs = fakeQuantWithMinMaxArgs' id fakeQuantWithMinMaxArgs' :: OpParams -> Tensor v'1 Float -- ^ __inputs__ -> Tensor Build Float -- ^ __outputs__ fakeQuantWithMinMaxArgs' op'options inputs | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs inputs] return (opDef "FakeQuantWithMinMaxArgs" & op'options & opInputs .~ op'inputs) {- input_arg { name: "inputs" type: DT_FLOAT } output_arg { name: "outputs" type: DT_FLOAT } attr { name: "min" type: "float" default_value { f: -6.0 } } attr { name: "max" type: "float" default_value { f: 6.0 } } -} -- | Compute gradients for a FakeQuantWithMinMaxArgs operation. fakeQuantWithMinMaxArgsGradient :: Tensor v'1 Float -- ^ __gradients__: Backpropagated gradients above the FakeQuantWithMinMaxArgs operation. -> Tensor v'2 Float -- ^ __inputs__: Values passed as inputs to the FakeQuantWithMinMaxArgs operation. -> Tensor Build Float -- ^ __backprops__: Backpropagated gradients below the FakeQuantWithMinMaxArgs operation: -- `gradients * (inputs >= min && inputs <= max)`. fakeQuantWithMinMaxArgsGradient = fakeQuantWithMinMaxArgsGradient' id fakeQuantWithMinMaxArgsGradient' :: OpParams -> Tensor v'1 Float -- ^ __gradients__: Backpropagated gradients above the FakeQuantWithMinMaxArgs operation. -> Tensor v'2 Float -- ^ __inputs__: Values passed as inputs to the FakeQuantWithMinMaxArgs operation. -> Tensor Build Float -- ^ __backprops__: Backpropagated gradients below the FakeQuantWithMinMaxArgs operation: -- `gradients * (inputs >= min && inputs <= max)`. fakeQuantWithMinMaxArgsGradient' op'options gradients inputs | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs gradients, buildInputs inputs] return (opDef "FakeQuantWithMinMaxArgsGradient" & op'options & opInputs .~ op'inputs) {- input_arg { name: "gradients" description: "Backpropagated gradients above the FakeQuantWithMinMaxArgs operation." type: DT_FLOAT } input_arg { name: "inputs" description: "Values passed as inputs to the FakeQuantWithMinMaxArgs operation." type: DT_FLOAT } output_arg { name: "backprops" description: "Backpropagated gradients below the FakeQuantWithMinMaxArgs operation:\n`gradients * (inputs >= min && inputs <= max)`." type: DT_FLOAT } attr { name: "min" type: "float" default_value { f: -6.0 } } attr { name: "max" type: "float" default_value { f: 6.0 } } -} -- | Fake-quantize the 'inputs' tensor of type float via global float scalars `min` -- -- and `max` to 'outputs' tensor of same shape as `inputs`. -- -- [min; max] is the clamping range for the 'inputs' data. Op divides this range -- into 255 steps (total of 256 values), then replaces each 'inputs' value with the -- closest of the quantized step values. -- -- This operation has a gradient and thus allows for training `min` and `max` values. fakeQuantWithMinMaxVars :: Tensor v'1 Float -- ^ __inputs__ -> Tensor v'2 Float -- ^ __min__ -> Tensor v'3 Float -- ^ __max__ -> Tensor Build Float -- ^ __outputs__ fakeQuantWithMinMaxVars = fakeQuantWithMinMaxVars' id fakeQuantWithMinMaxVars' :: OpParams -> Tensor v'1 Float -- ^ __inputs__ -> Tensor v'2 Float -- ^ __min__ -> Tensor v'3 Float -- ^ __max__ -> Tensor Build Float -- ^ __outputs__ fakeQuantWithMinMaxVars' op'options inputs min max | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs inputs, buildInputs min, buildInputs max] return (opDef "FakeQuantWithMinMaxVars" & op'options & opInputs .~ op'inputs) {- input_arg { name: "inputs" type: DT_FLOAT } input_arg { name: "min" type: DT_FLOAT } input_arg { name: "max" type: DT_FLOAT } output_arg { name: "outputs" type: DT_FLOAT } -} -- | Compute gradients for a FakeQuantWithMinMaxVars operation. fakeQuantWithMinMaxVarsGradient :: Tensor v'1 Float -- ^ __gradients__: Backpropagated gradients above the FakeQuantWithMinMaxVars operation. -> Tensor v'2 Float -- ^ __inputs__: Values passed as inputs to the FakeQuantWithMinMaxVars operation. -- min, max: Quantization interval, scalar floats. -> Tensor v'3 Float -- ^ __min__ -> Tensor v'4 Float -- ^ __max__ -> (Tensor Build Float, Tensor Build Float, Tensor Build Float) -- ^ (__backprops_wrt_input__, __backprop_wrt_min__, __backprop_wrt_max__) -- -- * __backprops_wrt_input__: Backpropagated gradients w.r.t. inputs: -- `gradients * (inputs >= min && inputs <= max)`. -- -- * __backprop_wrt_min__: Backpropagated gradients w.r.t. min parameter: -- `sum(gradients * (inputs < min))`. -- -- * __backprop_wrt_max__: Backpropagated gradients w.r.t. max parameter: -- `sum(gradients * (inputs > max))`. fakeQuantWithMinMaxVarsGradient = fakeQuantWithMinMaxVarsGradient' id fakeQuantWithMinMaxVarsGradient' :: OpParams -> Tensor v'1 Float -- ^ __gradients__: Backpropagated gradients above the FakeQuantWithMinMaxVars operation. -> Tensor v'2 Float -- ^ __inputs__: Values passed as inputs to the FakeQuantWithMinMaxVars operation. -- min, max: Quantization interval, scalar floats. -> Tensor v'3 Float -- ^ __min__ -> Tensor v'4 Float -- ^ __max__ -> (Tensor Build Float, Tensor Build Float, Tensor Build Float) -- ^ (__backprops_wrt_input__, __backprop_wrt_min__, __backprop_wrt_max__) -- -- * __backprops_wrt_input__: Backpropagated gradients w.r.t. inputs: -- `gradients * (inputs >= min && inputs <= max)`. -- -- * __backprop_wrt_min__: Backpropagated gradients w.r.t. min parameter: -- `sum(gradients * (inputs < min))`. -- -- * __backprop_wrt_max__: Backpropagated gradients w.r.t. max parameter: -- `sum(gradients * (inputs > max))`. fakeQuantWithMinMaxVarsGradient' op'options gradients inputs min max | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs gradients, buildInputs inputs, buildInputs min, buildInputs max] return (opDef "FakeQuantWithMinMaxVarsGradient" & op'options & opInputs .~ op'inputs) {- input_arg { name: "gradients" description: "Backpropagated gradients above the FakeQuantWithMinMaxVars operation." type: DT_FLOAT } input_arg { name: "inputs" description: "Values passed as inputs to the FakeQuantWithMinMaxVars operation.\nmin, max: Quantization interval, scalar floats." type: DT_FLOAT } input_arg { name: "min" type: DT_FLOAT } input_arg { name: "max" type: DT_FLOAT } output_arg { name: "backprops_wrt_input" description: "Backpropagated gradients w.r.t. inputs:\n`gradients * (inputs >= min && inputs <= max)`." type: DT_FLOAT } output_arg { name: "backprop_wrt_min" description: "Backpropagated gradients w.r.t. min parameter:\n`sum(gradients * (inputs < min))`." type: DT_FLOAT } output_arg { name: "backprop_wrt_max" description: "Backpropagated gradients w.r.t. max parameter:\n`sum(gradients * (inputs > max))`." type: DT_FLOAT } -} -- | Fake-quantize the 'inputs' tensor of type float and one of the shapes: `[d]`, -- -- `[b, d]` `[b, h, w, d]` via per-channel floats `min` and `max` of shape `[d]` -- to 'outputs' tensor of same shape as `inputs`. -- -- [min; max] is the clamping range for the 'inputs' data in the corresponding -- depth channel. Op divides this range into 255 steps (total of 256 values), then -- replaces each 'inputs' value with the closest of the quantized step values. -- -- This operation has a gradient and thus allows for training `min` and `max` values. fakeQuantWithMinMaxVarsPerChannel :: Tensor v'1 Float -- ^ __inputs__ -> Tensor v'2 Float -- ^ __min__ -> Tensor v'3 Float -- ^ __max__ -> Tensor Build Float -- ^ __outputs__ fakeQuantWithMinMaxVarsPerChannel = fakeQuantWithMinMaxVarsPerChannel' id fakeQuantWithMinMaxVarsPerChannel' :: OpParams -> Tensor v'1 Float -- ^ __inputs__ -> Tensor v'2 Float -- ^ __min__ -> Tensor v'3 Float -- ^ __max__ -> Tensor Build Float -- ^ __outputs__ fakeQuantWithMinMaxVarsPerChannel' op'options inputs min max | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs inputs, buildInputs min, buildInputs max] return (opDef "FakeQuantWithMinMaxVarsPerChannel" & op'options & opInputs .~ op'inputs) {- input_arg { name: "inputs" type: DT_FLOAT } input_arg { name: "min" type: DT_FLOAT } input_arg { name: "max" type: DT_FLOAT } output_arg { name: "outputs" type: DT_FLOAT } -} -- | Compute gradients for a FakeQuantWithMinMaxVarsPerChannel operation. fakeQuantWithMinMaxVarsPerChannelGradient :: Tensor v'1 Float -- ^ __gradients__: Backpropagated gradients above the FakeQuantWithMinMaxVars operation, -- shape one of: `[d]`, `[b, d]`, `[b, h, w, d]`. -> Tensor v'2 Float -- ^ __inputs__: Values passed as inputs to the FakeQuantWithMinMaxVars operation, shape -- same as `gradients`. -- min, max: Quantization interval, floats of shape `[d]`. -> Tensor v'3 Float -- ^ __min__ -> Tensor v'4 Float -- ^ __max__ -> (Tensor Build Float, Tensor Build Float, Tensor Build Float) -- ^ (__backprops_wrt_input__, __backprop_wrt_min__, __backprop_wrt_max__) -- -- * __backprops_wrt_input__: Backpropagated gradients w.r.t. inputs, shape same as -- `inputs`: -- `gradients * (inputs >= min && inputs <= max)`. -- -- * __backprop_wrt_min__: Backpropagated gradients w.r.t. min parameter, shape `[d]`: -- `sum_per_d(gradients * (inputs < min))`. -- -- * __backprop_wrt_max__: Backpropagated gradients w.r.t. max parameter, shape `[d]`: -- `sum_per_d(gradients * (inputs > max))`. fakeQuantWithMinMaxVarsPerChannelGradient = fakeQuantWithMinMaxVarsPerChannelGradient' id fakeQuantWithMinMaxVarsPerChannelGradient' :: OpParams -> Tensor v'1 Float -- ^ __gradients__: Backpropagated gradients above the FakeQuantWithMinMaxVars operation, -- shape one of: `[d]`, `[b, d]`, `[b, h, w, d]`. -> Tensor v'2 Float -- ^ __inputs__: Values passed as inputs to the FakeQuantWithMinMaxVars operation, shape -- same as `gradients`. -- min, max: Quantization interval, floats of shape `[d]`. -> Tensor v'3 Float -- ^ __min__ -> Tensor v'4 Float -- ^ __max__ -> (Tensor Build Float, Tensor Build Float, Tensor Build Float) -- ^ (__backprops_wrt_input__, __backprop_wrt_min__, __backprop_wrt_max__) -- -- * __backprops_wrt_input__: Backpropagated gradients w.r.t. inputs, shape same as -- `inputs`: -- `gradients * (inputs >= min && inputs <= max)`. -- -- * __backprop_wrt_min__: Backpropagated gradients w.r.t. min parameter, shape `[d]`: -- `sum_per_d(gradients * (inputs < min))`. -- -- * __backprop_wrt_max__: Backpropagated gradients w.r.t. max parameter, shape `[d]`: -- `sum_per_d(gradients * (inputs > max))`. fakeQuantWithMinMaxVarsPerChannelGradient' op'options gradients inputs min max | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs gradients, buildInputs inputs, buildInputs min, buildInputs max] return (opDef "FakeQuantWithMinMaxVarsPerChannelGradient" & op'options & opInputs .~ op'inputs) {- input_arg { name: "gradients" description: "Backpropagated gradients above the FakeQuantWithMinMaxVars operation,\nshape one of: `[d]`, `[b, d]`, `[b, h, w, d]`." type: DT_FLOAT } input_arg { name: "inputs" description: "Values passed as inputs to the FakeQuantWithMinMaxVars operation, shape\n same as `gradients`.\nmin, max: Quantization interval, floats of shape `[d]`." type: DT_FLOAT } input_arg { name: "min" type: DT_FLOAT } input_arg { name: "max" type: DT_FLOAT } output_arg { name: "backprops_wrt_input" description: "Backpropagated gradients w.r.t. inputs, shape same as\n`inputs`:\n `gradients * (inputs >= min && inputs <= max)`." type: DT_FLOAT } output_arg { name: "backprop_wrt_min" description: "Backpropagated gradients w.r.t. min parameter, shape `[d]`:\n`sum_per_d(gradients * (inputs < min))`." type: DT_FLOAT } output_arg { name: "backprop_wrt_max" description: "Backpropagated gradients w.r.t. max parameter, shape `[d]`:\n`sum_per_d(gradients * (inputs > max))`." type: DT_FLOAT } -} -- | Deprecated. Do not use. fakeQueue :: forall v'1 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __resource__ -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __handle__ fakeQueue = fakeQueue' id fakeQueue' :: forall v'1 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __resource__ -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __handle__ fakeQueue' op'options resource | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs resource] buildOp [] (opDef "FakeQueue" & op'options & opInputs .~ op'inputs) {- input_arg { name: "resource" type: DT_RESOURCE } output_arg { name: "handle" type: DT_STRING is_ref: true } -} -- | Creates a tensor filled with a scalar value. -- -- This operation creates a tensor of shape `dims` and fills it with `value`. -- -- For example: -- -- ```prettyprint -- # Output tensor has shape [2, 3]. -- fill([2, 3], 9) ==> [[9, 9, 9] -- [9, 9, 9]] -- ``` fill :: forall v'1 v'2 t . (TensorType t) => Tensor v'1 Data.Int.Int32 -- ^ __dims__: 1-D. Represents the shape of the output tensor. -> Tensor v'2 t -- ^ __value__: 0-D (scalar). Value to fill the returned tensor. -- -- @compatibility(numpy) -- Equivalent to np.full -- @end_compatibility -> Tensor Build t -- ^ __output__ fill = fill' id fill' :: forall v'1 v'2 t . (TensorType t) => OpParams -> Tensor v'1 Data.Int.Int32 -- ^ __dims__: 1-D. Represents the shape of the output tensor. -> Tensor v'2 t -- ^ __value__: 0-D (scalar). Value to fill the returned tensor. -- -- @compatibility(numpy) -- Equivalent to np.full -- @end_compatibility -> Tensor Build t -- ^ __output__ fill' op'options dims value | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs dims, buildInputs value] return (opDef "Fill" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "dims" description: "1-D. Represents the shape of the output tensor." type: DT_INT32 } input_arg { name: "value" description: "0-D (scalar). Value to fill the returned tensor.\n\n@compatibility(numpy)\nEquivalent to np.full\n@end_compatibility" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } -} -- | A Reader that outputs fixed-length records from a file. fixedLengthRecordReader :: forall m' . (MonadBuild m') => Data.Int.Int64 -- ^ __record_bytes__ -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __reader_handle__: The handle to reference the Reader. fixedLengthRecordReader = fixedLengthRecordReader' id fixedLengthRecordReader' :: forall m' . (MonadBuild m') => OpParams -> Data.Int.Int64 -- ^ __record_bytes__ -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __reader_handle__: The handle to reference the Reader. fixedLengthRecordReader' op'options record_bytes | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "FixedLengthRecordReader" & opAttr "record_bytes" .~ record_bytes & op'options & opInputs .~ op'inputs) {- output_arg { name: "reader_handle" description: "The handle to reference the Reader." type: DT_STRING is_ref: true } attr { name: "header_bytes" type: "int" default_value { i: 0 } } attr { name: "record_bytes" type: "int" } attr { name: "footer_bytes" type: "int" default_value { i: 0 } } attr { name: "container" type: "string" default_value { s: "" } description: "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used." } attr { name: "shared_name" type: "string" default_value { s: "" } description: "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead." } -} -- | A Reader that outputs fixed-length records from a file. fixedLengthRecordReaderV2 :: forall m' . (MonadBuild m') => Data.Int.Int64 -- ^ __record_bytes__ -> m' (Tensor Value ResourceHandle) -- ^ __reader_handle__: The handle to reference the Reader. fixedLengthRecordReaderV2 = fixedLengthRecordReaderV2' id fixedLengthRecordReaderV2' :: forall m' . (MonadBuild m') => OpParams -> Data.Int.Int64 -- ^ __record_bytes__ -> m' (Tensor Value ResourceHandle) -- ^ __reader_handle__: The handle to reference the Reader. fixedLengthRecordReaderV2' op'options record_bytes | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "FixedLengthRecordReaderV2" & opAttr "record_bytes" .~ record_bytes & op'options & opInputs .~ op'inputs) {- output_arg { name: "reader_handle" description: "The handle to reference the Reader." type: DT_RESOURCE } attr { name: "header_bytes" type: "int" default_value { i: 0 } } attr { name: "record_bytes" type: "int" } attr { name: "footer_bytes" type: "int" default_value { i: 0 } } attr { name: "container" type: "string" default_value { s: "" } description: "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used." } attr { name: "shared_name" type: "string" default_value { s: "" } description: "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead." } -} -- | Generates labels for candidate sampling with a learned unigram distribution. -- -- A unigram sampler could use a fixed unigram distribution read from a -- file or passed in as an in-memory array instead of building up the distribution -- from data on the fly. There is also an option to skew the distribution by -- applying a distortion power to the weights. -- -- The vocabulary file should be in CSV-like format, with the last field -- being the weight associated with the word. -- -- For each batch, this op picks a single set of sampled candidate labels. -- -- The advantages of sampling candidates per-batch are simplicity and the -- possibility of efficient dense matrix multiplication. The disadvantage is that -- the sampled candidates must be chosen independently of the context and of the -- true labels. fixedUnigramCandidateSampler :: Data.Int.Int64 -- ^ __num_sampled__: Number of candidates to randomly sample per batch. -> Data.Int.Int64 -- ^ __num_true__: Number of true labels per context. -> Data.Int.Int64 -- ^ __range_max__: The sampler will sample integers from the interval [0, range_max). -> Bool -- ^ __unique__: If unique is true, we sample with rejection, so that all sampled -- candidates in a batch are unique. This requires some approximation to -- estimate the post-rejection sampling probabilities. -> Tensor v'1 Data.Int.Int64 -- ^ __true_classes__: A batch_size * num_true matrix, in which each row contains the -- IDs of the num_true target_classes in the corresponding original label. -> (Tensor Build Data.Int.Int64, Tensor Build Float, Tensor Build Float) -- ^ (__sampled_candidates__, __true_expected_count__, __sampled_expected_count__) -- -- * __sampled_candidates__: A vector of length num_sampled, in which each element is -- the ID of a sampled candidate. -- -- * __true_expected_count__: A batch_size * num_true matrix, representing -- the number of times each candidate is expected to occur in a batch -- of sampled candidates. If unique=true, then this is a probability. -- -- * __sampled_expected_count__: A vector of length num_sampled, for each sampled -- candidate representing the number of times the candidate is expected -- to occur in a batch of sampled candidates. If unique=true, then this is a -- probability. fixedUnigramCandidateSampler = fixedUnigramCandidateSampler' id fixedUnigramCandidateSampler' :: OpParams -> Data.Int.Int64 -- ^ __num_sampled__: Number of candidates to randomly sample per batch. -> Data.Int.Int64 -- ^ __num_true__: Number of true labels per context. -> Data.Int.Int64 -- ^ __range_max__: The sampler will sample integers from the interval [0, range_max). -> Bool -- ^ __unique__: If unique is true, we sample with rejection, so that all sampled -- candidates in a batch are unique. This requires some approximation to -- estimate the post-rejection sampling probabilities. -> Tensor v'1 Data.Int.Int64 -- ^ __true_classes__: A batch_size * num_true matrix, in which each row contains the -- IDs of the num_true target_classes in the corresponding original label. -> (Tensor Build Data.Int.Int64, Tensor Build Float, Tensor Build Float) -- ^ (__sampled_candidates__, __true_expected_count__, __sampled_expected_count__) -- -- * __sampled_candidates__: A vector of length num_sampled, in which each element is -- the ID of a sampled candidate. -- -- * __true_expected_count__: A batch_size * num_true matrix, representing -- the number of times each candidate is expected to occur in a batch -- of sampled candidates. If unique=true, then this is a probability. -- -- * __sampled_expected_count__: A vector of length num_sampled, for each sampled -- candidate representing the number of times the candidate is expected -- to occur in a batch of sampled candidates. If unique=true, then this is a -- probability. fixedUnigramCandidateSampler' op'options num_sampled num_true range_max unique true_classes | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs true_classes] return (opDef "FixedUnigramCandidateSampler" & opAttr "num_sampled" .~ num_sampled & opAttr "num_true" .~ num_true & opAttr "range_max" .~ range_max & opAttr "unique" .~ unique & op'options & opInputs .~ op'inputs) {- input_arg { name: "true_classes" description: "A batch_size * num_true matrix, in which each row contains the\nIDs of the num_true target_classes in the corresponding original label." type: DT_INT64 } output_arg { name: "sampled_candidates" description: "A vector of length num_sampled, in which each element is\nthe ID of a sampled candidate." type: DT_INT64 } output_arg { name: "true_expected_count" description: "A batch_size * num_true matrix, representing\nthe number of times each candidate is expected to occur in a batch\nof sampled candidates. If unique=true, then this is a probability." type: DT_FLOAT } output_arg { name: "sampled_expected_count" description: "A vector of length num_sampled, for each sampled\ncandidate representing the number of times the candidate is expected\nto occur in a batch of sampled candidates. If unique=true, then this is a\nprobability." type: DT_FLOAT } attr { name: "num_true" type: "int" description: "Number of true labels per context." has_minimum: true minimum: 1 } attr { name: "num_sampled" type: "int" description: "Number of candidates to randomly sample per batch." has_minimum: true minimum: 1 } attr { name: "unique" type: "bool" description: "If unique is true, we sample with rejection, so that all sampled\ncandidates in a batch are unique. This requires some approximation to\nestimate the post-rejection sampling probabilities." } attr { name: "range_max" type: "int" description: "The sampler will sample integers from the interval [0, range_max)." has_minimum: true minimum: 1 } attr { name: "vocab_file" type: "string" default_value { s: "" } description: "Each valid line in this file (which should have a CSV-like format)\ncorresponds to a valid word ID. IDs are in sequential order, starting from\nnum_reserved_ids. The last entry in each line is expected to be a value\ncorresponding to the count or relative probability. Exactly one of vocab_file\nand unigrams needs to be passed to this op." } attr { name: "distortion" type: "float" default_value { f: 1.0 } description: "The distortion is used to skew the unigram probability distribution.\nEach weight is first raised to the distortion\'s power before adding to the\ninternal unigram distribution. As a result, distortion = 1.0 gives regular\nunigram sampling (as defined by the vocab file), and distortion = 0.0 gives\na uniform distribution." } attr { name: "num_reserved_ids" type: "int" default_value { i: 0 } description: "Optionally some reserved IDs can be added in the range [0,\n..., num_reserved_ids) by the users. One use case is that a special unknown\nword token is used as ID 0. These IDs will have a sampling probability of 0." } attr { name: "num_shards" type: "int" default_value { i: 1 } description: "A sampler can be used to sample from a subset of the original range\nin order to speed up the whole computation through parallelism. This parameter\n(together with \'shard\') indicates the number of partitions that are being\nused in the overall computation." has_minimum: true minimum: 1 } attr { name: "shard" type: "int" default_value { i: 0 } description: "A sampler can be used to sample from a subset of the original range\nin order to speed up the whole computation through parallelism. This parameter\n(together with \'num_shards\') indicates the particular partition number of a\nsampler op, when partitioning is being used." has_minimum: true } attr { name: "unigrams" type: "list(float)" default_value { list { } } description: "A list of unigram counts or probabilities, one per ID in sequential\norder. Exactly one of vocab_file and unigrams should be passed to this op." } attr { name: "seed" type: "int" default_value { i: 0 } description: "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed." } attr { name: "seed2" type: "int" default_value { i: 0 } description: "An second seed to avoid seed collision." } -} -- | Returns element-wise largest integer not greater than x. floor :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ floor = floor' id floor' :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ floor' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "Floor" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } -} -- | Returns x // y element-wise. -- -- *NOTE*: `FloorDiv` supports broadcasting. More about broadcasting -- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) floorDiv :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ floorDiv = floorDiv' id floorDiv' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ floorDiv' op'options x y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y] return (opDef "FloorDiv" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_UINT8 type: DT_INT8 type: DT_UINT16 type: DT_INT16 type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | Returns element-wise remainder of division. When `x < 0` xor `y < 0` is -- -- true, this follows Python semantics in that the result here is consistent -- with a flooring divide. E.g. `floor(x / y) * y + mod(x, y) = x`. -- -- *NOTE*: `FloorMod` supports broadcasting. More about broadcasting -- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) floorMod :: forall v'1 v'2 t . (OneOf '[Data.Int.Int32, Data.Int.Int64, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ floorMod = floorMod' id floorMod' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int32, Data.Int.Int64, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ floorMod' op'options x y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y] return (opDef "FloorMod" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 type: DT_FLOAT type: DT_DOUBLE } } } -} -- | Performs fractional average pooling on the input. -- -- Fractional average pooling is similar to Fractional max pooling in the pooling -- region generation step. The only difference is that after pooling regions are -- generated, a mean operation is performed instead of a max operation in each -- pooling region. fractionalAvgPool :: forall v'1 t . (OneOf '[Data.Int.Int32, Data.Int.Int64, Double, Float] t) => Tensor v'1 t -- ^ __value__: 4-D with shape `[batch, height, width, channels]`. -> (Tensor Build t, Tensor Build Data.Int.Int64, Tensor Build Data.Int.Int64) -- ^ (__output__, __row_pooling_sequence__, __col_pooling_sequence__) -- -- * __output__: output tensor after fractional avg pooling. -- -- * __row_pooling_sequence__: row pooling sequence, needed to calculate gradient. -- -- * __col_pooling_sequence__: column pooling sequence, needed to calculate gradient. fractionalAvgPool = fractionalAvgPool' id fractionalAvgPool' :: forall v'1 t . (OneOf '[Data.Int.Int32, Data.Int.Int64, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __value__: 4-D with shape `[batch, height, width, channels]`. -> (Tensor Build t, Tensor Build Data.Int.Int64, Tensor Build Data.Int.Int64) -- ^ (__output__, __row_pooling_sequence__, __col_pooling_sequence__) -- -- * __output__: output tensor after fractional avg pooling. -- -- * __row_pooling_sequence__: row pooling sequence, needed to calculate gradient. -- -- * __col_pooling_sequence__: column pooling sequence, needed to calculate gradient. fractionalAvgPool' op'options value | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs value] return (opDef "FractionalAvgPool" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "value" description: "4-D with shape `[batch, height, width, channels]`." type_attr: "T" } output_arg { name: "output" description: "output tensor after fractional avg pooling." type_attr: "T" } output_arg { name: "row_pooling_sequence" description: "row pooling sequence, needed to calculate gradient." type: DT_INT64 } output_arg { name: "col_pooling_sequence" description: "column pooling sequence, needed to calculate gradient." type: DT_INT64 } attr { name: "pooling_ratio" type: "list(float)" description: "Pooling ratio for each dimension of `value`, currently only\nsupports row and col dimension and should be >= 1.0. For example, a valid\npooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements\nmust be 1.0 because we don\'t allow pooling on batch and channels\ndimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions\nrespectively." has_minimum: true minimum: 4 } attr { name: "pseudo_random" type: "bool" default_value { b: false } description: "When set to True, generates the pooling sequence in a\npseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin\nGraham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for\ndifference between pseudorandom and random." } attr { name: "overlapping" type: "bool" default_value { b: false } description: "When set to True, it means when pooling, the values at the boundary\nof adjacent pooling cells are used by both cells. For example:\n\n`index 0 1 2 3 4`\n\n`value 20 5 16 3 7`\n\nIf the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.\nThe result would be [41/3, 26/3] for fractional avg pooling." } attr { name: "deterministic" type: "bool" default_value { b: false } description: "When set to True, a fixed pooling region will be used when\niterating over a FractionalAvgPool node in the computation graph. Mainly used\nin unit test to make FractionalAvgPool deterministic." } attr { name: "seed" type: "int" default_value { i: 0 } description: "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed." } attr { name: "seed2" type: "int" default_value { i: 0 } description: "An second seed to avoid seed collision." } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 } } } -} -- | Computes gradient of the FractionalAvgPool function. -- -- Unlike FractionalMaxPoolGrad, we don't need to find arg_max for -- FractionalAvgPoolGrad, we just need to evenly back-propagate each element of -- out_backprop to those indices that form the same pooling cell. Therefore, we -- just need to know the shape of original input tensor, instead of the whole -- tensor. fractionalAvgPoolGrad :: forall v'1 v'2 v'3 v'4 t . (OneOf '[Data.Int.Int32, Data.Int.Int64, Double, Float] t) => Tensor v'1 Data.Int.Int64 -- ^ __orig_input_tensor_shape__: Original input tensor shape for `fractional_avg_pool` -> Tensor v'2 t -- ^ __out_backprop__: 4-D with shape `[batch, height, width, channels]`. Gradients -- w.r.t. the output of `fractional_avg_pool`. -> Tensor v'3 Data.Int.Int64 -- ^ __row_pooling_sequence__: row pooling sequence, form pooling region with -- col_pooling_sequence. -> Tensor v'4 Data.Int.Int64 -- ^ __col_pooling_sequence__: column pooling sequence, form pooling region with -- row_pooling sequence. -> Tensor Build t -- ^ __output__: 4-D. Gradients w.r.t. the input of `fractional_avg_pool`. fractionalAvgPoolGrad = fractionalAvgPoolGrad' id fractionalAvgPoolGrad' :: forall v'1 v'2 v'3 v'4 t . (OneOf '[Data.Int.Int32, Data.Int.Int64, Double, Float] t) => OpParams -> Tensor v'1 Data.Int.Int64 -- ^ __orig_input_tensor_shape__: Original input tensor shape for `fractional_avg_pool` -> Tensor v'2 t -- ^ __out_backprop__: 4-D with shape `[batch, height, width, channels]`. Gradients -- w.r.t. the output of `fractional_avg_pool`. -> Tensor v'3 Data.Int.Int64 -- ^ __row_pooling_sequence__: row pooling sequence, form pooling region with -- col_pooling_sequence. -> Tensor v'4 Data.Int.Int64 -- ^ __col_pooling_sequence__: column pooling sequence, form pooling region with -- row_pooling sequence. -> Tensor Build t -- ^ __output__: 4-D. Gradients w.r.t. the input of `fractional_avg_pool`. fractionalAvgPoolGrad' op'options orig_input_tensor_shape out_backprop row_pooling_sequence col_pooling_sequence | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs orig_input_tensor_shape, buildInputs out_backprop, buildInputs row_pooling_sequence, buildInputs col_pooling_sequence] return (opDef "FractionalAvgPoolGrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "orig_input_tensor_shape" description: "Original input tensor shape for `fractional_avg_pool`" type: DT_INT64 } input_arg { name: "out_backprop" description: "4-D with shape `[batch, height, width, channels]`. Gradients\nw.r.t. the output of `fractional_avg_pool`." type_attr: "T" } input_arg { name: "row_pooling_sequence" description: "row pooling sequence, form pooling region with\ncol_pooling_sequence." type: DT_INT64 } input_arg { name: "col_pooling_sequence" description: "column pooling sequence, form pooling region with\nrow_pooling sequence." type: DT_INT64 } output_arg { name: "output" description: "4-D. Gradients w.r.t. the input of `fractional_avg_pool`." type_attr: "T" } attr { name: "overlapping" type: "bool" default_value { b: false } description: "When set to True, it means when pooling, the values at the boundary\nof adjacent pooling cells are used by both cells. For example:\n\n`index 0 1 2 3 4`\n\n`value 20 5 16 3 7`\n\nIf the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.\nThe result would be [41/3, 26/3] for fractional avg pooling." } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 } } } -} -- | Performs fractional max pooling on the input. -- -- Fractional max pooling is slightly different than regular max pooling. In -- regular max pooling, you downsize an input set by taking the maximum value of -- smaller N x N subsections of the set (often 2x2), and try to reduce the set by -- a factor of N, where N is an integer. Fractional max pooling, as you might -- expect from the word "fractional", means that the overall reduction ratio N -- does not have to be an integer. -- -- The sizes of the pooling regions are generated randomly but are fairly uniform. -- For example, let's look at the height dimension, and the constraints on the -- list of rows that will be pool boundaries. -- -- First we define the following: -- -- 1. input_row_length : the number of rows from the input set -- 2. output_row_length : which will be smaller than the input -- 3. alpha = input_row_length / output_row_length : our reduction ratio -- 4. K = floor(alpha) -- 5. row_pooling_sequence : this is the result list of pool boundary rows -- -- Then, row_pooling_sequence should satisfy: -- -- 1. a[0] = 0 : the first value of the sequence is 0 -- 2. a[end] = input_row_length : the last value of the sequence is the size -- 3. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size -- 4. length(row_pooling_sequence) = output_row_length+1 -- -- For more details on fractional max pooling, see this paper: -- [Benjamin Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) fractionalMaxPool :: forall v'1 t . (OneOf '[Data.Int.Int32, Data.Int.Int64, Double, Float] t) => Tensor v'1 t -- ^ __value__: 4-D with shape `[batch, height, width, channels]`. -> (Tensor Build t, Tensor Build Data.Int.Int64, Tensor Build Data.Int.Int64) -- ^ (__output__, __row_pooling_sequence__, __col_pooling_sequence__) -- -- * __output__: output tensor after fractional max pooling. -- -- * __row_pooling_sequence__: row pooling sequence, needed to calculate gradient. -- -- * __col_pooling_sequence__: column pooling sequence, needed to calculate gradient. fractionalMaxPool = fractionalMaxPool' id fractionalMaxPool' :: forall v'1 t . (OneOf '[Data.Int.Int32, Data.Int.Int64, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __value__: 4-D with shape `[batch, height, width, channels]`. -> (Tensor Build t, Tensor Build Data.Int.Int64, Tensor Build Data.Int.Int64) -- ^ (__output__, __row_pooling_sequence__, __col_pooling_sequence__) -- -- * __output__: output tensor after fractional max pooling. -- -- * __row_pooling_sequence__: row pooling sequence, needed to calculate gradient. -- -- * __col_pooling_sequence__: column pooling sequence, needed to calculate gradient. fractionalMaxPool' op'options value | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs value] return (opDef "FractionalMaxPool" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "value" description: "4-D with shape `[batch, height, width, channels]`." type_attr: "T" } output_arg { name: "output" description: "output tensor after fractional max pooling." type_attr: "T" } output_arg { name: "row_pooling_sequence" description: "row pooling sequence, needed to calculate gradient." type: DT_INT64 } output_arg { name: "col_pooling_sequence" description: "column pooling sequence, needed to calculate gradient." type: DT_INT64 } attr { name: "pooling_ratio" type: "list(float)" description: "Pooling ratio for each dimension of `value`, currently only\nsupports row and col dimension and should be >= 1.0. For example, a valid\npooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements\nmust be 1.0 because we don\'t allow pooling on batch and channels\ndimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions\nrespectively." has_minimum: true minimum: 4 } attr { name: "pseudo_random" type: "bool" default_value { b: false } description: "When set to True, generates the pooling sequence in a\npseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin\nGraham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for\ndifference between pseudorandom and random." } attr { name: "overlapping" type: "bool" default_value { b: false } description: "When set to True, it means when pooling, the values at the boundary\nof adjacent pooling cells are used by both cells. For example:\n\n`index 0 1 2 3 4`\n\n`value 20 5 16 3 7`\n\nIf the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.\nThe result would be [20, 16] for fractional max pooling." } attr { name: "deterministic" type: "bool" default_value { b: false } description: "When set to True, a fixed pooling region will be used when\niterating over a FractionalMaxPool node in the computation graph. Mainly used\nin unit test to make FractionalMaxPool deterministic." } attr { name: "seed" type: "int" default_value { i: 0 } description: "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed." } attr { name: "seed2" type: "int" default_value { i: 0 } description: "An second seed to avoid seed collision." } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 } } } -} -- | Computes gradient of the FractionalMaxPool function. fractionalMaxPoolGrad :: forall v'1 v'2 v'3 v'4 v'5 t . (OneOf '[Data.Int.Int32, Data.Int.Int64, Double, Float] t) => Tensor v'1 t -- ^ __orig_input__: Original input for `fractional_max_pool` -> Tensor v'2 t -- ^ __orig_output__: Original output for `fractional_max_pool` -> Tensor v'3 t -- ^ __out_backprop__: 4-D with shape `[batch, height, width, channels]`. Gradients -- w.r.t. the output of `fractional_max_pool`. -> Tensor v'4 Data.Int.Int64 -- ^ __row_pooling_sequence__: row pooling sequence, form pooling region with -- col_pooling_sequence. -> Tensor v'5 Data.Int.Int64 -- ^ __col_pooling_sequence__: column pooling sequence, form pooling region with -- row_pooling sequence. -> Tensor Build t -- ^ __output__: 4-D. Gradients w.r.t. the input of `fractional_max_pool`. fractionalMaxPoolGrad = fractionalMaxPoolGrad' id fractionalMaxPoolGrad' :: forall v'1 v'2 v'3 v'4 v'5 t . (OneOf '[Data.Int.Int32, Data.Int.Int64, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __orig_input__: Original input for `fractional_max_pool` -> Tensor v'2 t -- ^ __orig_output__: Original output for `fractional_max_pool` -> Tensor v'3 t -- ^ __out_backprop__: 4-D with shape `[batch, height, width, channels]`. Gradients -- w.r.t. the output of `fractional_max_pool`. -> Tensor v'4 Data.Int.Int64 -- ^ __row_pooling_sequence__: row pooling sequence, form pooling region with -- col_pooling_sequence. -> Tensor v'5 Data.Int.Int64 -- ^ __col_pooling_sequence__: column pooling sequence, form pooling region with -- row_pooling sequence. -> Tensor Build t -- ^ __output__: 4-D. Gradients w.r.t. the input of `fractional_max_pool`. fractionalMaxPoolGrad' op'options orig_input orig_output out_backprop row_pooling_sequence col_pooling_sequence | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs orig_input, buildInputs orig_output, buildInputs out_backprop, buildInputs row_pooling_sequence, buildInputs col_pooling_sequence] return (opDef "FractionalMaxPoolGrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "orig_input" description: "Original input for `fractional_max_pool`" type_attr: "T" } input_arg { name: "orig_output" description: "Original output for `fractional_max_pool`" type_attr: "T" } input_arg { name: "out_backprop" description: "4-D with shape `[batch, height, width, channels]`. Gradients\nw.r.t. the output of `fractional_max_pool`." type_attr: "T" } input_arg { name: "row_pooling_sequence" description: "row pooling sequence, form pooling region with\ncol_pooling_sequence." type: DT_INT64 } input_arg { name: "col_pooling_sequence" description: "column pooling sequence, form pooling region with\nrow_pooling sequence." type: DT_INT64 } output_arg { name: "output" description: "4-D. Gradients w.r.t. the input of `fractional_max_pool`." type_attr: "T" } attr { name: "overlapping" type: "bool" default_value { b: false } description: "When set to True, it means when pooling, the values at the boundary\nof adjacent pooling cells are used by both cells. For example:\n\n`index 0 1 2 3 4`\n\n`value 20 5 16 3 7`\n\nIf the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.\nThe result would be [20, 16] for fractional max pooling." } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 } } } -} -- | Batch normalization. -- -- Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". -- The size of 1D Tensors matches the dimension C of the 4D Tensors. fusedBatchNorm :: forall v'1 v'2 v'3 v'4 v'5 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __x__: A 4D Tensor for input data. -> Tensor v'2 t -- ^ __scale__: A 1D Tensor for scaling factor, to scale the normalized x. -> Tensor v'3 t -- ^ __offset__: A 1D Tensor for offset, to shift to the normalized x. -> Tensor v'4 t -- ^ __mean__: A 1D Tensor for population mean. Used for inference only; -- must be empty for training. -> Tensor v'5 t -- ^ __variance__: A 1D Tensor for population variance. Used for inference only; -- must be empty for training. -> (Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t) -- ^ (__y__, __batch_mean__, __batch_variance__, __reserve_space_1__, __reserve_space_2__) -- -- * __y__: A 4D Tensor for output data. -- -- * __batch_mean__: A 1D Tensor for the computed batch mean, to be used by TensorFlow -- to compute the running mean. -- -- * __batch_variance__: A 1D Tensor for the computed batch variance, to be used by -- TensorFlow to compute the running variance. -- -- * __reserve_space_1__: A 1D Tensor for the computed batch mean, to be reused -- in the gradient computation. -- -- * __reserve_space_2__: A 1D Tensor for the computed batch variance (inverted variance -- in the cuDNN case), to be used in the gradient computation. fusedBatchNorm = fusedBatchNorm' id fusedBatchNorm' :: forall v'1 v'2 v'3 v'4 v'5 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__: A 4D Tensor for input data. -> Tensor v'2 t -- ^ __scale__: A 1D Tensor for scaling factor, to scale the normalized x. -> Tensor v'3 t -- ^ __offset__: A 1D Tensor for offset, to shift to the normalized x. -> Tensor v'4 t -- ^ __mean__: A 1D Tensor for population mean. Used for inference only; -- must be empty for training. -> Tensor v'5 t -- ^ __variance__: A 1D Tensor for population variance. Used for inference only; -- must be empty for training. -> (Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t) -- ^ (__y__, __batch_mean__, __batch_variance__, __reserve_space_1__, __reserve_space_2__) -- -- * __y__: A 4D Tensor for output data. -- -- * __batch_mean__: A 1D Tensor for the computed batch mean, to be used by TensorFlow -- to compute the running mean. -- -- * __batch_variance__: A 1D Tensor for the computed batch variance, to be used by -- TensorFlow to compute the running variance. -- -- * __reserve_space_1__: A 1D Tensor for the computed batch mean, to be reused -- in the gradient computation. -- -- * __reserve_space_2__: A 1D Tensor for the computed batch variance (inverted variance -- in the cuDNN case), to be used in the gradient computation. fusedBatchNorm' op'options x scale offset mean variance | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs scale, buildInputs offset, buildInputs mean, buildInputs variance] return (opDef "FusedBatchNorm" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" description: "A 4D Tensor for input data." type_attr: "T" } input_arg { name: "scale" description: "A 1D Tensor for scaling factor, to scale the normalized x." type_attr: "T" } input_arg { name: "offset" description: "A 1D Tensor for offset, to shift to the normalized x." type_attr: "T" } input_arg { name: "mean" description: "A 1D Tensor for population mean. Used for inference only;\nmust be empty for training." type_attr: "T" } input_arg { name: "variance" description: "A 1D Tensor for population variance. Used for inference only;\nmust be empty for training." type_attr: "T" } output_arg { name: "y" description: "A 4D Tensor for output data." type_attr: "T" } output_arg { name: "batch_mean" description: "A 1D Tensor for the computed batch mean, to be used by TensorFlow\nto compute the running mean." type_attr: "T" } output_arg { name: "batch_variance" description: "A 1D Tensor for the computed batch variance, to be used by\nTensorFlow to compute the running variance." type_attr: "T" } output_arg { name: "reserve_space_1" description: "A 1D Tensor for the computed batch mean, to be reused\nin the gradient computation." type_attr: "T" } output_arg { name: "reserve_space_2" description: "A 1D Tensor for the computed batch variance (inverted variance\nin the cuDNN case), to be used in the gradient computation." type_attr: "T" } attr { name: "T" type: "type" description: "The data type for the elements of input and output Tensors." allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "epsilon" type: "float" default_value { f: 1.0e-4 } description: "A small float number added to the variance of x." } attr { name: "data_format" type: "string" default_value { s: "NHWC" } description: "The data format for x and y. Either \"NHWC\" (default) or \"NCHW\"." } attr { name: "is_training" type: "bool" default_value { b: true } description: "A bool value to indicate the operation is for training (default)\nor inference." } -} -- | Gradient for batch normalization. -- -- Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". -- The size of 1D Tensors matches the dimension C of the 4D Tensors. fusedBatchNormGrad :: forall v'1 v'2 v'3 v'4 v'5 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __y_backprop__: A 4D Tensor for the gradient with respect to y. -> Tensor v'2 t -- ^ __x__: A 4D Tensor for input data. -> Tensor v'3 t -- ^ __scale__: A 1D Tensor for scaling factor, to scale the normalized x. -> Tensor v'4 t -- ^ __reserve_space_1__: A 1D Tensor for the computed batch mean, to be reused -- in the gradient computation. -> Tensor v'5 t -- ^ __reserve_space_2__: A 1D Tensor for the computed batch variance (inverted variance -- in the cuDNN case), to be used in the gradient computation. -> (Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t) -- ^ (__x_backprop__, __scale_backprop__, __offset_backprop__, __reserve_space_3__, __reserve_space_4__) -- -- * __x_backprop__: A 4D Tensor for the gradient with respect to x. -- -- * __scale_backprop__: A 1D Tensor for the gradient with respect to scale. -- -- * __offset_backprop__: A 1D Tensor for the gradient with respect to offset. -- -- * __reserve_space_3__: Unused placeholder to match the mean input in FusedBatchNorm. -- -- * __reserve_space_4__: Unused placeholder to match the variance input -- in FusedBatchNorm. fusedBatchNormGrad = fusedBatchNormGrad' id fusedBatchNormGrad' :: forall v'1 v'2 v'3 v'4 v'5 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __y_backprop__: A 4D Tensor for the gradient with respect to y. -> Tensor v'2 t -- ^ __x__: A 4D Tensor for input data. -> Tensor v'3 t -- ^ __scale__: A 1D Tensor for scaling factor, to scale the normalized x. -> Tensor v'4 t -- ^ __reserve_space_1__: A 1D Tensor for the computed batch mean, to be reused -- in the gradient computation. -> Tensor v'5 t -- ^ __reserve_space_2__: A 1D Tensor for the computed batch variance (inverted variance -- in the cuDNN case), to be used in the gradient computation. -> (Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t) -- ^ (__x_backprop__, __scale_backprop__, __offset_backprop__, __reserve_space_3__, __reserve_space_4__) -- -- * __x_backprop__: A 4D Tensor for the gradient with respect to x. -- -- * __scale_backprop__: A 1D Tensor for the gradient with respect to scale. -- -- * __offset_backprop__: A 1D Tensor for the gradient with respect to offset. -- -- * __reserve_space_3__: Unused placeholder to match the mean input in FusedBatchNorm. -- -- * __reserve_space_4__: Unused placeholder to match the variance input -- in FusedBatchNorm. fusedBatchNormGrad' op'options y_backprop x scale reserve_space_1 reserve_space_2 | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs y_backprop, buildInputs x, buildInputs scale, buildInputs reserve_space_1, buildInputs reserve_space_2] return (opDef "FusedBatchNormGrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "y_backprop" description: "A 4D Tensor for the gradient with respect to y." type_attr: "T" } input_arg { name: "x" description: "A 4D Tensor for input data." type_attr: "T" } input_arg { name: "scale" description: "A 1D Tensor for scaling factor, to scale the normalized x." type_attr: "T" } input_arg { name: "reserve_space_1" description: "A 1D Tensor for the computed batch mean, to be reused\nin the gradient computation." type_attr: "T" } input_arg { name: "reserve_space_2" description: "A 1D Tensor for the computed batch variance (inverted variance\nin the cuDNN case), to be used in the gradient computation." type_attr: "T" } output_arg { name: "x_backprop" description: "A 4D Tensor for the gradient with respect to x." type_attr: "T" } output_arg { name: "scale_backprop" description: "A 1D Tensor for the gradient with respect to scale." type_attr: "T" } output_arg { name: "offset_backprop" description: "A 1D Tensor for the gradient with respect to offset." type_attr: "T" } output_arg { name: "reserve_space_3" description: "Unused placeholder to match the mean input in FusedBatchNorm." type_attr: "T" } output_arg { name: "reserve_space_4" description: "Unused placeholder to match the variance input\nin FusedBatchNorm." type_attr: "T" } attr { name: "T" type: "type" description: "The data type for the elements of input and output Tensors." allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "epsilon" type: "float" default_value { f: 1.0e-4 } description: "A small float number added to the variance of x." } attr { name: "data_format" type: "string" default_value { s: "NHWC" } description: "The data format for y_backprop, x, x_backprop.\nEither \"NHWC\" (default) or \"NCHW\"." } attr { name: "is_training" type: "bool" default_value { b: true } description: "A bool value to indicate the operation is for training (default)\nor inference." } -} -- | Performs a padding as a preprocess during a convolution. -- -- Similar to FusedResizeAndPadConv2d, this op allows for an optimized -- implementation where the spatial padding transformation stage is fused with the -- im2col lookup, but in this case without the bilinear filtering required for -- resizing. Fusing the padding prevents the need to write out the intermediate -- results as whole tensors, reducing memory pressure, and we can get some latency -- gains by merging the transformation calculations. -- The data_format attribute for Conv2D isn't supported by this op, and 'NHWC' -- order is used instead. -- Internally this op uses a single per-graph scratch buffer, which means that it -- will block if multiple versions are being run in parallel. This is because this -- operator is primarily an optimization to minimize memory usage. fusedPadConv2D :: forall v'1 v'2 v'3 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __input__: 4-D with shape `[batch, in_height, in_width, in_channels]`. -> Tensor v'2 Data.Int.Int32 -- ^ __paddings__: A two-column matrix specifying the padding sizes. The number of -- rows must be the same as the rank of `input`. -> Tensor v'3 t -- ^ __filter__: 4-D with shape -- `[filter_height, filter_width, in_channels, out_channels]`. -> Tensor Build t -- ^ __output__ fusedPadConv2D = fusedPadConv2D' id fusedPadConv2D' :: forall v'1 v'2 v'3 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__: 4-D with shape `[batch, in_height, in_width, in_channels]`. -> Tensor v'2 Data.Int.Int32 -- ^ __paddings__: A two-column matrix specifying the padding sizes. The number of -- rows must be the same as the rank of `input`. -> Tensor v'3 t -- ^ __filter__: 4-D with shape -- `[filter_height, filter_width, in_channels, out_channels]`. -> Tensor Build t -- ^ __output__ fusedPadConv2D' op'options input paddings filter | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs paddings, buildInputs filter] return (opDef "FusedPadConv2D" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "4-D with shape `[batch, in_height, in_width, in_channels]`." type_attr: "T" } input_arg { name: "paddings" description: "A two-column matrix specifying the padding sizes. The number of\nrows must be the same as the rank of `input`." type: DT_INT32 } input_arg { name: "filter" description: "4-D with shape\n`[filter_height, filter_width, in_channels, out_channels]`." type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "mode" type: "string" allowed_values { list { s: "REFLECT" s: "SYMMETRIC" } } } attr { name: "strides" type: "list(int)" description: "1-D of length 4. The stride of the sliding window for each dimension\nof `input`. Must be in the same order as the dimension specified with format." } attr { name: "padding" type: "string" description: "The type of padding algorithm to use." allowed_values { list { s: "SAME" s: "VALID" } } } -} -- | Performs a resize and padding as a preprocess during a convolution. -- -- It's often possible to do spatial transformations more efficiently as part of -- the packing stage of a convolution, so this op allows for an optimized -- implementation where these stages are fused together. This prevents the need to -- write out the intermediate results as whole tensors, reducing memory pressure, -- and we can get some latency gains by merging the transformation calculations. -- The data_format attribute for Conv2D isn't supported by this op, and defaults to -- 'NHWC' order. -- Internally this op uses a single per-graph scratch buffer, which means that it -- will block if multiple versions are being run in parallel. This is because this -- operator is primarily an optimization to minimize memory usage. fusedResizeAndPadConv2D :: forall v'1 v'2 v'3 v'4 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __input__: 4-D with shape `[batch, in_height, in_width, in_channels]`. -> Tensor v'2 Data.Int.Int32 -- ^ __size__: A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The -- new size for the images. -> Tensor v'3 Data.Int.Int32 -- ^ __paddings__: A two-column matrix specifying the padding sizes. The number of -- rows must be the same as the rank of `input`. -> Tensor v'4 t -- ^ __filter__: 4-D with shape -- `[filter_height, filter_width, in_channels, out_channels]`. -> Tensor Build t -- ^ __output__ fusedResizeAndPadConv2D = fusedResizeAndPadConv2D' id fusedResizeAndPadConv2D' :: forall v'1 v'2 v'3 v'4 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__: 4-D with shape `[batch, in_height, in_width, in_channels]`. -> Tensor v'2 Data.Int.Int32 -- ^ __size__: A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The -- new size for the images. -> Tensor v'3 Data.Int.Int32 -- ^ __paddings__: A two-column matrix specifying the padding sizes. The number of -- rows must be the same as the rank of `input`. -> Tensor v'4 t -- ^ __filter__: 4-D with shape -- `[filter_height, filter_width, in_channels, out_channels]`. -> Tensor Build t -- ^ __output__ fusedResizeAndPadConv2D' op'options input size paddings filter | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs size, buildInputs paddings, buildInputs filter] return (opDef "FusedResizeAndPadConv2D" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "4-D with shape `[batch, in_height, in_width, in_channels]`." type_attr: "T" } input_arg { name: "size" description: "A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The\nnew size for the images." type: DT_INT32 } input_arg { name: "paddings" description: "A two-column matrix specifying the padding sizes. The number of\nrows must be the same as the rank of `input`." type: DT_INT32 } input_arg { name: "filter" description: "4-D with shape\n`[filter_height, filter_width, in_channels, out_channels]`." type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "resize_align_corners" type: "bool" default_value { b: false } description: "If true, rescale input by (new_height - 1) / (height - 1),\nwhich exactly aligns the 4 corners of images and resized images. If false, rescale\nby new_height / height. Treat similarly the width dimension." } attr { name: "mode" type: "string" allowed_values { list { s: "REFLECT" s: "SYMMETRIC" } } } attr { name: "strides" type: "list(int)" description: "1-D of length 4. The stride of the sliding window for each dimension\nof `input`. Must be in the same order as the dimension specified with format." } attr { name: "padding" type: "string" description: "The type of padding algorithm to use." allowed_values { list { s: "SAME" s: "VALID" } } } -} -- | Gather slices from `params` according to `indices`. -- -- `indices` must be an integer tensor of any dimension (usually 0-D or 1-D). -- Produces an output tensor with shape `indices.shape + params.shape[1:]` where: -- -- ```python -- # Scalar indices -- output[:, ..., :] = params[indices, :, ... :] -- -- # Vector indices -- output[i, :, ..., :] = params[indices[i], :, ... :] -- -- # Higher rank indices -- output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :] -- ``` -- -- If `indices` is a permutation and `len(indices) == params.shape[0]` then -- this operation will permute `params` accordingly. -- --
-- --
gather :: forall v'1 v'2 tparams tindices . (TensorType tparams, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor v'1 tparams -- ^ __params__ -> Tensor v'2 tindices -- ^ __indices__ -> Tensor Build tparams -- ^ __output__ gather = gather' id gather' :: forall v'1 v'2 tparams tindices . (TensorType tparams, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor v'1 tparams -- ^ __params__ -> Tensor v'2 tindices -- ^ __indices__ -> Tensor Build tparams -- ^ __output__ gather' op'options params indices | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs params, buildInputs indices] return (opDef "Gather" & opAttr "Tparams" .~ tensorType (undefined :: tparams) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "params" type_attr: "Tparams" } input_arg { name: "indices" type_attr: "Tindices" } output_arg { name: "output" type_attr: "Tparams" } attr { name: "validate_indices" type: "bool" default_value { b: true } } attr { name: "Tparams" type: "type" } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | Gather values or slices from `params` according to `indices`. -- -- `params` is a Tensor of rank `P` and `indices` is a Tensor of rank `Q`. -- -- `indices` must be integer tensor, containing indices into `params`. -- It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. -- -- The innermost dimension of `indices` (with length `K`) corresponds to -- indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th -- dimension of `params`. -- -- Produces an output tensor with shape -- -- ``` -- [d_0, ..., d_{Q-2}, params.shape[K], ..., params.shape[P-1]]. -- ``` -- -- Some examples below. -- -- Simple indexing into a matrix: -- -- ```python -- indices = [[0, 0], [1, 1]] -- params = [['a', 'b'], ['c', 'd']] -- output = ['a', 'd'] -- ``` -- -- Slice indexing into a matrix: -- -- ```python -- indices = [[1], [0]] -- params = [['a', 'b'], ['c', 'd']] -- output = [['c', 'd'], ['a', 'b']] -- ``` -- -- Indexing into a 3-tensor: -- -- ```python -- indices = [[1]] -- params = [[['a0', 'b0'], ['c0', 'd0']], -- [['a1', 'b1'], ['c1', 'd1']]] -- output = [[['a1', 'b1'], ['c1', 'd1']]] -- -- -- indices = [[0, 1], [1, 0]] -- params = [[['a0', 'b0'], ['c0', 'd0']], -- [['a1', 'b1'], ['c1', 'd1']]] -- output = [['c0', 'd0'], ['a1', 'b1']] -- -- -- indices = [[0, 0, 1], [1, 0, 1]] -- params = [[['a0', 'b0'], ['c0', 'd0']], -- [['a1', 'b1'], ['c1', 'd1']]] -- output = ['b0', 'b1'] -- ``` -- -- Batched indexing into a matrix: -- -- ```python -- indices = [[[0, 0]], [[0, 1]]] -- params = [['a', 'b'], ['c', 'd']] -- output = [['a'], ['b']] -- ``` -- -- Batched slice indexing into a matrix: -- -- ```python -- indices = [[[1]], [[0]]] -- params = [['a', 'b'], ['c', 'd']] -- output = [[['c', 'd']], [['a', 'b']]] -- ``` -- -- Batched indexing into a 3-tensor: -- -- ```python -- indices = [[[1]], [[0]]] -- params = [[['a0', 'b0'], ['c0', 'd0']], -- [['a1', 'b1'], ['c1', 'd1']]] -- output = [[[['a1', 'b1'], ['c1', 'd1']]], -- [[['a0', 'b0'], ['c0', 'd0']]]] -- -- indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]] -- params = [[['a0', 'b0'], ['c0', 'd0']], -- [['a1', 'b1'], ['c1', 'd1']]] -- output = [[['c0', 'd0'], ['a1', 'b1']], -- [['a0', 'b0'], ['c1', 'd1']]] -- -- -- indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]] -- params = [[['a0', 'b0'], ['c0', 'd0']], -- [['a1', 'b1'], ['c1', 'd1']]] -- output = [['b0', 'b1'], ['d0', 'c1']] -- ``` gatherNd :: forall v'1 v'2 tparams tindices . (TensorType tparams, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor v'1 tparams -- ^ __params__: `P-D`. The tensor from which to gather values. -> Tensor v'2 tindices -- ^ __indices__: `Q-D`. Index tensor having shape `[d_0, ..., d_{Q-2}, K]`. -> Tensor Build tparams -- ^ __output__: `(P+Q-K-1)-D`. Values from `params` gathered from indices given by -- `indices`. gatherNd = gatherNd' id gatherNd' :: forall v'1 v'2 tparams tindices . (TensorType tparams, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor v'1 tparams -- ^ __params__: `P-D`. The tensor from which to gather values. -> Tensor v'2 tindices -- ^ __indices__: `Q-D`. Index tensor having shape `[d_0, ..., d_{Q-2}, K]`. -> Tensor Build tparams -- ^ __output__: `(P+Q-K-1)-D`. Values from `params` gathered from indices given by -- `indices`. gatherNd' op'options params indices | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs params, buildInputs indices] return (opDef "GatherNd" & opAttr "Tparams" .~ tensorType (undefined :: tparams) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "params" description: "`P-D`. The tensor from which to gather values." type_attr: "Tparams" } input_arg { name: "indices" description: "`Q-D`. Index tensor having shape `[d_0, ..., d_{Q-2}, K]`." type_attr: "Tindices" } output_arg { name: "output" description: "`(P+Q-K-1)-D`. Values from `params` gathered from indices given by\n`indices`." type_attr: "Tparams" } attr { name: "Tparams" type: "type" } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | getSessionHandle :: forall v'1 t . (TensorType t) => Tensor v'1 t -- ^ __value__ -> Tensor Build Data.ByteString.ByteString -- ^ __handle__ getSessionHandle = getSessionHandle' id getSessionHandle' :: forall v'1 t . (TensorType t) => OpParams -> Tensor v'1 t -- ^ __value__ -> Tensor Build Data.ByteString.ByteString -- ^ __handle__ getSessionHandle' op'options value | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs value] return (opDef "GetSessionHandle" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "value" type_attr: "T" } output_arg { name: "handle" type: DT_STRING } attr { name: "T" type: "type" } -} -- | Store the input tensor in the state of the current session. getSessionHandleV2 :: forall v'1 t m' . (MonadBuild m', TensorType t) => Tensor v'1 t -- ^ __value__: The tensor to be stored. -> m' (Tensor Value ResourceHandle) -- ^ __handle__: The handle for the tensor stored in the session state, represented -- as a ResourceHandle object. getSessionHandleV2 = getSessionHandleV2' id getSessionHandleV2' :: forall v'1 t m' . (MonadBuild m', TensorType t) => OpParams -> Tensor v'1 t -- ^ __value__: The tensor to be stored. -> m' (Tensor Value ResourceHandle) -- ^ __handle__: The handle for the tensor stored in the session state, represented -- as a ResourceHandle object. getSessionHandleV2' op'options value | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs value] buildOp [] (opDef "GetSessionHandleV2" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "value" description: "The tensor to be stored." type_attr: "T" } output_arg { name: "handle" description: "The handle for the tensor stored in the session state, represented\nas a ResourceHandle object." type: DT_RESOURCE } attr { name: "T" type: "type" } -} -- | Get the value of the tensor specified by its handle. getSessionTensor :: forall v'1 dtype . (TensorType dtype) => Tensor v'1 Data.ByteString.ByteString -- ^ __handle__: The handle for a tensor stored in the session state. -> Tensor Build dtype -- ^ __value__: The tensor for the given handle. getSessionTensor = getSessionTensor' id getSessionTensor' :: forall v'1 dtype . (TensorType dtype) => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __handle__: The handle for a tensor stored in the session state. -> Tensor Build dtype -- ^ __value__: The tensor for the given handle. getSessionTensor' op'options handle | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle] return (opDef "GetSessionTensor" & opAttr "dtype" .~ tensorType (undefined :: dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" description: "The handle for a tensor stored in the session state." type: DT_STRING } output_arg { name: "value" description: "The tensor for the given handle." type_attr: "dtype" } attr { name: "dtype" type: "type" description: "The type of the output value." } -} -- | Returns the truth value of (x > y) element-wise. -- -- *NOTE*: `Greater` supports broadcasting. More about broadcasting -- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) greater :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build Bool -- ^ __z__ greater = greater' id greater' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build Bool -- ^ __z__ greater' op'options x y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y] return (opDef "Greater" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type: DT_BOOL } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF } } } -} -- | Returns the truth value of (x >= y) element-wise. -- -- *NOTE*: `GreaterEqual` supports broadcasting. More about broadcasting -- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) greaterEqual :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build Bool -- ^ __z__ greaterEqual = greaterEqual' id greaterEqual' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build Bool -- ^ __z__ greaterEqual' op'options x y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y] return (opDef "GreaterEqual" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type: DT_BOOL } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF } } } -} -- | Convert one or more images from HSV to RGB. -- -- Outputs a tensor of the same shape as the `images` tensor, containing the RGB -- value of the pixels. The output is only well defined if the value in `images` -- are in `[0,1]`. -- -- See `rgb_to_hsv` for a description of the HSV encoding. hSVToRGB :: forall v'1 t . (OneOf '[Double, Float] t) => Tensor v'1 t -- ^ __images__: 1-D or higher rank. HSV data to convert. Last dimension must be size 3. -> Tensor Build t -- ^ __output__: `images` converted to RGB. hSVToRGB = hSVToRGB' id hSVToRGB' :: forall v'1 t . (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __images__: 1-D or higher rank. HSV data to convert. Last dimension must be size 3. -> Tensor Build t -- ^ __output__: `images` converted to RGB. hSVToRGB' op'options images | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs images] return (opDef "HSVToRGB" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "images" description: "1-D or higher rank. HSV data to convert. Last dimension must be size 3." type_attr: "T" } output_arg { name: "output" description: "`images` converted to RGB." type_attr: "T" } attr { name: "T" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } -} -- | Creates a non-initialized hash table. -- -- This op creates a hash table, specifying the type of its keys and values. -- Before using the table you will have to initialize it. After initialization the -- table will be immutable. hashTable :: forall m' . (MonadBuild m') => DataType -- ^ __key_dtype__: Type of the table keys. -> DataType -- ^ __value_dtype__: Type of the table values. -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __table_handle__: Handle to a table. hashTable = hashTable' id hashTable' :: forall m' . (MonadBuild m') => OpParams -> DataType -- ^ __key_dtype__: Type of the table keys. -> DataType -- ^ __value_dtype__: Type of the table values. -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __table_handle__: Handle to a table. hashTable' op'options key_dtype value_dtype | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "HashTable" & opAttr "key_dtype" .~ key_dtype & opAttr "value_dtype" .~ value_dtype & op'options & opInputs .~ op'inputs) {- output_arg { name: "table_handle" description: "Handle to a table." type: DT_STRING is_ref: true } attr { name: "container" type: "string" default_value { s: "" } description: "If non-empty, this table is placed in the given container.\nOtherwise, a default container is used." } attr { name: "shared_name" type: "string" default_value { s: "" } description: "If non-empty, this table is shared under the given name across\nmultiple sessions." } attr { name: "use_node_name_sharing" type: "bool" default_value { b: false } description: "If true and shared_name is empty, the table is shared\nusing the node name." } attr { name: "key_dtype" type: "type" description: "Type of the table keys." } attr { name: "value_dtype" type: "type" description: "Type of the table values." } -} -- | Outputs a `Summary` protocol buffer with a histogram. -- -- The generated -- [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto) -- has one summary value containing a histogram for `values`. -- -- This op reports an `InvalidArgument` error if any value is not finite. histogramSummary :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 Data.ByteString.ByteString -- ^ __tag__: Scalar. Tag to use for the `Summary.Value`. -> Tensor v'2 t -- ^ __values__: Any shape. Values to use to build the histogram. -> Tensor Build Data.ByteString.ByteString -- ^ __summary__: Scalar. Serialized `Summary` protocol buffer. histogramSummary = histogramSummary' id histogramSummary' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __tag__: Scalar. Tag to use for the `Summary.Value`. -> Tensor v'2 t -- ^ __values__: Any shape. Values to use to build the histogram. -> Tensor Build Data.ByteString.ByteString -- ^ __summary__: Scalar. Serialized `Summary` protocol buffer. histogramSummary' op'options tag values | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs tag, buildInputs values] return (opDef "HistogramSummary" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "tag" description: "Scalar. Tag to use for the `Summary.Value`." type: DT_STRING } input_arg { name: "values" description: "Any shape. Values to use to build the histogram." type_attr: "T" } output_arg { name: "summary" description: "Scalar. Serialized `Summary` protocol buffer." type: DT_STRING } attr { name: "T" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF } } } -} -- | Compute the inverse 1-dimensional discrete Fourier Transform over the inner-most -- -- dimension of `input`. iFFT :: Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__: A complex64 tensor. -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__: A complex64 tensor of the same shape as `input`. The inner-most -- dimension of `input` is replaced with its inverse 1D Fourier Transform. -- -- @compatibility(numpy) -- Equivalent to np.fft.ifft -- @end_compatibility iFFT = iFFT' id iFFT' :: OpParams -> Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__: A complex64 tensor. -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__: A complex64 tensor of the same shape as `input`. The inner-most -- dimension of `input` is replaced with its inverse 1D Fourier Transform. -- -- @compatibility(numpy) -- Equivalent to np.fft.ifft -- @end_compatibility iFFT' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "IFFT" & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "A complex64 tensor." type: DT_COMPLEX64 } output_arg { name: "output" description: "A complex64 tensor of the same shape as `input`. The inner-most\n dimension of `input` is replaced with its inverse 1D Fourier Transform.\n\n@compatibility(numpy)\nEquivalent to np.fft.ifft\n@end_compatibility" type: DT_COMPLEX64 } -} -- | Compute the inverse 2-dimensional discrete Fourier Transform over the inner-most -- -- 2 dimensions of `input`. iFFT2D :: Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__: A complex64 tensor. -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__: A complex64 tensor of the same shape as `input`. The inner-most 2 -- dimensions of `input` are replaced with their inverse 2D Fourier Transform. -- -- @compatibility(numpy) -- Equivalent to np.fft.ifft2 -- @end_compatibility iFFT2D = iFFT2D' id iFFT2D' :: OpParams -> Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__: A complex64 tensor. -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__: A complex64 tensor of the same shape as `input`. The inner-most 2 -- dimensions of `input` are replaced with their inverse 2D Fourier Transform. -- -- @compatibility(numpy) -- Equivalent to np.fft.ifft2 -- @end_compatibility iFFT2D' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "IFFT2D" & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "A complex64 tensor." type: DT_COMPLEX64 } output_arg { name: "output" description: "A complex64 tensor of the same shape as `input`. The inner-most 2\n dimensions of `input` are replaced with their inverse 2D Fourier Transform.\n\n@compatibility(numpy)\nEquivalent to np.fft.ifft2\n@end_compatibility" type: DT_COMPLEX64 } -} -- | Compute the inverse 3-dimensional discrete Fourier Transform over the inner-most -- -- 3 dimensions of `input`. iFFT3D :: Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__: A complex64 tensor. -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__: A complex64 tensor of the same shape as `input`. The inner-most 3 -- dimensions of `input` are replaced with their inverse 3D Fourier Transform. -- -- @compatibility(numpy) -- Equivalent to np.fft.ifftn with 3 dimensions. -- @end_compatibility iFFT3D = iFFT3D' id iFFT3D' :: OpParams -> Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__: A complex64 tensor. -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__: A complex64 tensor of the same shape as `input`. The inner-most 3 -- dimensions of `input` are replaced with their inverse 3D Fourier Transform. -- -- @compatibility(numpy) -- Equivalent to np.fft.ifftn with 3 dimensions. -- @end_compatibility iFFT3D' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "IFFT3D" & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "A complex64 tensor." type: DT_COMPLEX64 } output_arg { name: "output" description: "A complex64 tensor of the same shape as `input`. The inner-most 3\n dimensions of `input` are replaced with their inverse 3D Fourier Transform.\n\n@compatibility(numpy)\nEquivalent to np.fft.ifftn with 3 dimensions.\n@end_compatibility" type: DT_COMPLEX64 } -} -- | Compute the inverse 1-dimensional discrete Fourier Transform of a real-valued -- -- signal over the inner-most dimension of `input`. -- -- The inner-most dimension of `input` is assumed to be the result of `RFFT`: the -- `fft_length / 2 + 1` unique components of the DFT of a real-valued signal. If -- `fft_length` is not provided, it is computed from the size of the inner-most -- dimension of `input` (`fft_length = 2 * (inner - 1)`). If the FFT length used to -- compute `input` is odd, it should be provided since it cannot be inferred -- properly. iRFFT :: Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__: A complex64 tensor. -> Tensor v'2 Data.Int.Int32 -- ^ __fft_length__: An int32 tensor of shape [1]. The FFT length. -> Tensor Build Float -- ^ __output__: A float32 tensor of the same rank as `input`. The inner-most -- dimension of `input` is replaced with the `fft_length` samples of its inverse -- 1D Fourier Transform. -- -- @compatibility(numpy) -- Equivalent to np.fft.irfft -- @end_compatibility iRFFT = iRFFT' id iRFFT' :: OpParams -> Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__: A complex64 tensor. -> Tensor v'2 Data.Int.Int32 -- ^ __fft_length__: An int32 tensor of shape [1]. The FFT length. -> Tensor Build Float -- ^ __output__: A float32 tensor of the same rank as `input`. The inner-most -- dimension of `input` is replaced with the `fft_length` samples of its inverse -- 1D Fourier Transform. -- -- @compatibility(numpy) -- Equivalent to np.fft.irfft -- @end_compatibility iRFFT' op'options input fft_length | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs fft_length] return (opDef "IRFFT" & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "A complex64 tensor." type: DT_COMPLEX64 } input_arg { name: "fft_length" description: "An int32 tensor of shape [1]. The FFT length." type: DT_INT32 } output_arg { name: "output" description: "A float32 tensor of the same rank as `input`. The inner-most\n dimension of `input` is replaced with the `fft_length` samples of its inverse\n 1D Fourier Transform.\n\n@compatibility(numpy)\nEquivalent to np.fft.irfft\n@end_compatibility" type: DT_FLOAT } -} -- | Compute the inverse 2-dimensional discrete Fourier Transform of a real-valued -- -- signal over the inner-most 2 dimensions of `input`. -- -- The inner-most 2 dimensions of `input` are assumed to be the result of `RFFT2D`: -- The inner-most dimension contains the `fft_length / 2 + 1` unique components of -- the DFT of a real-valued signal. If `fft_length` is not provided, it is computed -- from the size of the inner-most 2 dimensions of `input`. If the FFT length used -- to compute `input` is odd, it should be provided since it cannot be inferred -- properly. iRFFT2D :: Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__: A complex64 tensor. -> Tensor v'2 Data.Int.Int32 -- ^ __fft_length__: An int32 tensor of shape [2]. The FFT length for each dimension. -> Tensor Build Float -- ^ __output__: A float32 tensor of the same rank as `input`. The inner-most 2 -- dimensions of `input` are replaced with the `fft_length` samples of their -- inverse 2D Fourier Transform. -- -- @compatibility(numpy) -- Equivalent to np.fft.irfft2 -- @end_compatibility iRFFT2D = iRFFT2D' id iRFFT2D' :: OpParams -> Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__: A complex64 tensor. -> Tensor v'2 Data.Int.Int32 -- ^ __fft_length__: An int32 tensor of shape [2]. The FFT length for each dimension. -> Tensor Build Float -- ^ __output__: A float32 tensor of the same rank as `input`. The inner-most 2 -- dimensions of `input` are replaced with the `fft_length` samples of their -- inverse 2D Fourier Transform. -- -- @compatibility(numpy) -- Equivalent to np.fft.irfft2 -- @end_compatibility iRFFT2D' op'options input fft_length | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs fft_length] return (opDef "IRFFT2D" & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "A complex64 tensor." type: DT_COMPLEX64 } input_arg { name: "fft_length" description: "An int32 tensor of shape [2]. The FFT length for each dimension." type: DT_INT32 } output_arg { name: "output" description: "A float32 tensor of the same rank as `input`. The inner-most 2\n dimensions of `input` are replaced with the `fft_length` samples of their\n inverse 2D Fourier Transform.\n\n@compatibility(numpy)\nEquivalent to np.fft.irfft2\n@end_compatibility" type: DT_FLOAT } -} -- | Compute the inverse 3-dimensional discrete Fourier Transform of a real-valued -- -- signal over the inner-most 3 dimensions of `input`. -- -- The inner-most 3 dimensions of `input` are assumed to be the result of `RFFT3D`: -- The inner-most dimension contains the `fft_length / 2 + 1` unique components of -- the DFT of a real-valued signal. If `fft_length` is not provided, it is computed -- from the size of the inner-most 3 dimensions of `input`. If the FFT length used -- to compute `input` is odd, it should be provided since it cannot be inferred -- properly. iRFFT3D :: Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__: A complex64 tensor. -> Tensor v'2 Data.Int.Int32 -- ^ __fft_length__: An int32 tensor of shape [3]. The FFT length for each dimension. -> Tensor Build Float -- ^ __output__: A float32 tensor of the same rank as `input`. The inner-most 3 -- dimensions of `input` are replaced with the `fft_length` samples of their -- inverse 3D real Fourier Transform. -- -- @compatibility(numpy) -- Equivalent to np.irfftn with 3 dimensions. -- @end_compatibility iRFFT3D = iRFFT3D' id iRFFT3D' :: OpParams -> Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__: A complex64 tensor. -> Tensor v'2 Data.Int.Int32 -- ^ __fft_length__: An int32 tensor of shape [3]. The FFT length for each dimension. -> Tensor Build Float -- ^ __output__: A float32 tensor of the same rank as `input`. The inner-most 3 -- dimensions of `input` are replaced with the `fft_length` samples of their -- inverse 3D real Fourier Transform. -- -- @compatibility(numpy) -- Equivalent to np.irfftn with 3 dimensions. -- @end_compatibility iRFFT3D' op'options input fft_length | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs fft_length] return (opDef "IRFFT3D" & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "A complex64 tensor." type: DT_COMPLEX64 } input_arg { name: "fft_length" description: "An int32 tensor of shape [3]. The FFT length for each dimension." type: DT_INT32 } output_arg { name: "output" description: "A float32 tensor of the same rank as `input`. The inner-most 3\n dimensions of `input` are replaced with the `fft_length` samples of their\n inverse 3D real Fourier Transform.\n\n@compatibility(numpy)\nEquivalent to np.irfftn with 3 dimensions.\n@end_compatibility" type: DT_FLOAT } -} -- | Return a tensor with the same shape and contents as the input tensor or value. identity :: forall v'1 t . (TensorType t) => Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ identity = identity' id identity' :: forall v'1 t . (TensorType t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ identity' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "Identity" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } -} -- | A Reader that outputs the queued work as both the key and value. -- -- To use, enqueue strings in a Queue. ReaderRead will take the front -- work string and output (work, work). identityReader :: forall m' . (MonadBuild m') => m' (Tensor Ref Data.ByteString.ByteString) -- ^ __reader_handle__: The handle to reference the Reader. identityReader = identityReader' id identityReader' :: forall m' . (MonadBuild m') => OpParams -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __reader_handle__: The handle to reference the Reader. identityReader' op'options | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "IdentityReader" & op'options & opInputs .~ op'inputs) {- output_arg { name: "reader_handle" description: "The handle to reference the Reader." type: DT_STRING is_ref: true } attr { name: "container" type: "string" default_value { s: "" } description: "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used." } attr { name: "shared_name" type: "string" default_value { s: "" } description: "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead." } -} -- | A Reader that outputs the queued work as both the key and value. -- -- To use, enqueue strings in a Queue. ReaderRead will take the front -- work string and output (work, work). identityReaderV2 :: forall m' . (MonadBuild m') => m' (Tensor Value ResourceHandle) -- ^ __reader_handle__: The handle to reference the Reader. identityReaderV2 = identityReaderV2' id identityReaderV2' :: forall m' . (MonadBuild m') => OpParams -> m' (Tensor Value ResourceHandle) -- ^ __reader_handle__: The handle to reference the Reader. identityReaderV2' op'options | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "IdentityReaderV2" & op'options & opInputs .~ op'inputs) {- output_arg { name: "reader_handle" description: "The handle to reference the Reader." type: DT_RESOURCE } attr { name: "container" type: "string" default_value { s: "" } description: "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used." } attr { name: "shared_name" type: "string" default_value { s: "" } description: "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead." } -} -- | Compute the lower regularized incomplete Gamma function `Q(a, x)`. -- -- The lower regularized incomplete Gamma function is defined as: -- -- ``` -- P(a, x) = gamma(a, x) / Gamma(a) = 1 - Q(a, x) -- ``` -- where -- ``` -- gamma(a, x) = int_{0}^{x} t^{a-1} exp(-t) dt -- ``` -- is the lower incomplete Gamma function. -- -- Note, above `Q(a, x)` (`Igammac`) is the upper regularized complete -- Gamma function. igamma :: forall v'1 v'2 t . (OneOf '[Double, Float] t) => Tensor v'1 t -- ^ __a__ -> Tensor v'2 t -- ^ __x__ -> Tensor Build t -- ^ __z__ igamma = igamma' id igamma' :: forall v'1 v'2 t . (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __a__ -> Tensor v'2 t -- ^ __x__ -> Tensor Build t -- ^ __z__ igamma' op'options a x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs a, buildInputs x] return (opDef "Igamma" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "a" type_attr: "T" } input_arg { name: "x" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } -} -- | Compute the upper regularized incomplete Gamma function `Q(a, x)`. -- -- The upper regularized incomplete Gamma function is defined as: -- -- ``` -- Q(a, x) = Gamma(a, x) / Gamma(a) = 1 - P(a, x) -- ``` -- where -- ``` -- Gamma(a, x) = int_{x}^{\infty} t^{a-1} exp(-t) dt -- ``` -- is the upper incomplete Gama function. -- -- Note, above `P(a, x)` (`Igamma`) is the lower regularized complete -- Gamma function. igammac :: forall v'1 v'2 t . (OneOf '[Double, Float] t) => Tensor v'1 t -- ^ __a__ -> Tensor v'2 t -- ^ __x__ -> Tensor Build t -- ^ __z__ igammac = igammac' id igammac' :: forall v'1 v'2 t . (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __a__ -> Tensor v'2 t -- ^ __x__ -> Tensor Build t -- ^ __z__ igammac' op'options a x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs a, buildInputs x] return (opDef "Igammac" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "a" type_attr: "T" } input_arg { name: "x" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } -} -- | Returns the imaginary part of a complex number. -- -- Given a tensor `input` of complex numbers, this operation returns a tensor of -- type `float` that is the imaginary part of each element in `input`. All -- elements in `input` must be complex numbers of the form \\(a + bj\\), where *a* -- is the real part and *b* is the imaginary part returned by this operation. -- -- For example: -- -- ``` -- # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] -- tf.imag(input) ==> [4.75, 5.75] -- ``` imag :: forall v'1 t tout . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float)] t, OneOf '[Double, Float] tout) => Tensor v'1 t -- ^ __input__ -> Tensor Build tout -- ^ __output__ imag = imag' id imag' :: forall v'1 t tout . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float)] t, OneOf '[Double, Float] tout) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor Build tout -- ^ __output__ imag' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "Imag" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tout" .~ tensorType (undefined :: tout) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type_attr: "Tout" } attr { name: "T" type: "type" default_value { type: DT_COMPLEX64 } allowed_values { list { type: DT_COMPLEX64 type: DT_COMPLEX128 } } } attr { name: "Tout" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } -} -- | Outputs a `Summary` protocol buffer with images. -- -- The summary has up to `max_images` summary values containing images. The -- images are built from `tensor` which must be 4-D with shape `[batch_size, -- height, width, channels]` and where `channels` can be: -- -- * 1: `tensor` is interpreted as Grayscale. -- * 3: `tensor` is interpreted as RGB. -- * 4: `tensor` is interpreted as RGBA. -- -- The images have the same number of channels as the input tensor. For float -- input, the values are normalized one image at a time to fit in the range -- `[0, 255]`. `uint8` values are unchanged. The op uses two different -- normalization algorithms: -- -- * If the input values are all positive, they are rescaled so the largest one -- is 255. -- -- * If any input value is negative, the values are shifted so input value 0.0 -- is at 127. They are then rescaled so that either the smallest value is 0, -- or the largest one is 255. -- -- The `tag` argument is a scalar `Tensor` of type `string`. It is used to -- build the `tag` of the summary values: -- -- * If `max_images` is 1, the summary value tag is '*tag*/image'. -- * If `max_images` is greater than 1, the summary value tags are -- generated sequentially as '*tag*/image/0', '*tag*/image/1', etc. -- -- The `bad_color` argument is the color to use in the generated images for -- non-finite input values. It is a `unit8` 1-D tensor of length `channels`. -- Each element must be in the range `[0, 255]` (It represents the value of a -- pixel in the output image). Non-finite values in the input tensor are -- replaced by this tensor in the output image. The default value is the color -- red. imageSummary :: forall v'1 v'2 t . (OneOf '[Data.Word.Word16, Data.Word.Word8, Float] t) => Tensor v'1 Data.ByteString.ByteString -- ^ __tag__: Scalar. Used to build the `tag` attribute of the summary values. -> Tensor v'2 t -- ^ __tensor__: 4-D of shape `[batch_size, height, width, channels]` where -- `channels` is 1, 3, or 4. -> Tensor Build Data.ByteString.ByteString -- ^ __summary__: Scalar. Serialized `Summary` protocol buffer. imageSummary = imageSummary' id imageSummary' :: forall v'1 v'2 t . (OneOf '[Data.Word.Word16, Data.Word.Word8, Float] t) => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __tag__: Scalar. Used to build the `tag` attribute of the summary values. -> Tensor v'2 t -- ^ __tensor__: 4-D of shape `[batch_size, height, width, channels]` where -- `channels` is 1, 3, or 4. -> Tensor Build Data.ByteString.ByteString -- ^ __summary__: Scalar. Serialized `Summary` protocol buffer. imageSummary' op'options tag tensor | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs tag, buildInputs tensor] return (opDef "ImageSummary" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "tag" description: "Scalar. Used to build the `tag` attribute of the summary values." type: DT_STRING } input_arg { name: "tensor" description: "4-D of shape `[batch_size, height, width, channels]` where\n`channels` is 1, 3, or 4." type_attr: "T" } output_arg { name: "summary" description: "Scalar. Serialized `Summary` protocol buffer." type: DT_STRING } attr { name: "max_images" type: "int" default_value { i: 3 } description: "Max number of batch elements to generate images for." has_minimum: true minimum: 1 } attr { name: "T" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_UINT8 type: DT_FLOAT type: DT_HALF } } } attr { name: "bad_color" type: "tensor" default_value { tensor { dtype: DT_UINT8 tensor_shape { dim { size: 4 } } int_val: 255 int_val: 0 int_val: 0 int_val: 255 } } description: "Color to use for pixels with non-finite values." } -} -- | Returns immutable tensor from memory region. -- -- The current implementation memmaps the tensor from a file. immutableConst :: forall dtype . (TensorType dtype) => Shape -- ^ __shape__: Shape of the returned tensor. -> Tensor Build dtype -- ^ __tensor__ immutableConst = immutableConst' id immutableConst' :: forall dtype . (TensorType dtype) => OpParams -> Shape -- ^ __shape__: Shape of the returned tensor. -> Tensor Build dtype -- ^ __tensor__ immutableConst' op'options shape | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] return (opDef "ImmutableConst" & opAttr "dtype" .~ tensorType (undefined :: dtype) & opAttr "shape" .~ shape & op'options & opInputs .~ op'inputs) {- output_arg { name: "tensor" type_attr: "dtype" } attr { name: "dtype" type: "type" description: "Type of the returned tensor." } attr { name: "shape" type: "shape" description: "Shape of the returned tensor." } attr { name: "memory_region_name" type: "string" description: "Name of readonly memory region used by the tensor, see\nNewReadOnlyMemoryRegionFromFile in tensorflow::Env." } -} -- | Says whether the targets are in the top `K` predictions. -- -- This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the -- prediction for the target class is among the top `k` predictions among -- all predictions for example `i`. Note that the behavior of `InTopK` differs -- from the `TopK` op in its handling of ties; if multiple classes have the -- same prediction value and straddle the top-`k` boundary, all of those -- classes are considered to be in the top `k`. -- -- More formally, let -- -- \\(predictions_i\\) be the predictions for all classes for example `i`, -- \\(targets_i\\) be the target class for example `i`, -- \\(out_i\\) be the output for example `i`, -- -- $$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$ inTopK :: forall v'1 v'2 t . (OneOf '[Data.Int.Int32, Data.Int.Int64] t) => Data.Int.Int64 -- ^ __k__: Number of top elements to look at for computing precision. -> Tensor v'1 Float -- ^ __predictions__: A `batch_size` x `classes` tensor. -> Tensor v'2 t -- ^ __targets__: A `batch_size` vector of class ids. -> Tensor Build Bool -- ^ __precision__: Computed Precision at `k` as a `bool Tensor`. inTopK = inTopK' id inTopK' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int32, Data.Int.Int64] t) => OpParams -> Data.Int.Int64 -- ^ __k__: Number of top elements to look at for computing precision. -> Tensor v'1 Float -- ^ __predictions__: A `batch_size` x `classes` tensor. -> Tensor v'2 t -- ^ __targets__: A `batch_size` vector of class ids. -> Tensor Build Bool -- ^ __precision__: Computed Precision at `k` as a `bool Tensor`. inTopK' op'options k predictions targets | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs predictions, buildInputs targets] return (opDef "InTopK" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "k" .~ k & op'options & opInputs .~ op'inputs) {- input_arg { name: "predictions" description: "A `batch_size` x `classes` tensor." type: DT_FLOAT } input_arg { name: "targets" description: "A `batch_size` vector of class ids." type_attr: "T" } output_arg { name: "precision" description: "Computed Precision at `k` as a `bool Tensor`." type: DT_BOOL } attr { name: "k" type: "int" description: "Number of top elements to look at for computing precision." } attr { name: "T" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | Table initializer that takes two tensors for keys and values respectively. initializeTable :: forall v'2 v'3 tkey tval m' . (MonadBuild m', TensorType tkey, TensorType tval) => Tensor Ref Data.ByteString.ByteString -- ^ __table_handle__: Handle to a table which will be initialized. -> Tensor v'2 tkey -- ^ __keys__: Keys of type Tkey. -> Tensor v'3 tval -- ^ __values__: Values of type Tval. -> m' (ControlNode) initializeTable = initializeTable' id initializeTable' :: forall v'2 v'3 tkey tval m' . (MonadBuild m', TensorType tkey, TensorType tval) => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __table_handle__: Handle to a table which will be initialized. -> Tensor v'2 tkey -- ^ __keys__: Keys of type Tkey. -> Tensor v'3 tval -- ^ __values__: Values of type Tval. -> m' (ControlNode) initializeTable' op'options table_handle keys values | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs table_handle, buildInputs keys, buildInputs values] buildOp [] (opDef "InitializeTable" & opAttr "Tkey" .~ tensorType (undefined :: tkey) & opAttr "Tval" .~ tensorType (undefined :: tval) & op'options & opInputs .~ op'inputs) {- input_arg { name: "table_handle" description: "Handle to a table which will be initialized." type: DT_STRING is_ref: true } input_arg { name: "keys" description: "Keys of type Tkey." type_attr: "Tkey" } input_arg { name: "values" description: "Values of type Tval." type_attr: "Tval" } attr { name: "Tkey" type: "type" } attr { name: "Tval" type: "type" } -} -- | Initializes a table from a text file. -- -- It inserts one key-value pair into the table for each line of the file. -- The key and value is extracted from the whole line content, elements from the -- split line based on `delimiter` or the line number (starting from zero). -- Where to extract the key and value from a line is specified by `key_index` and -- `value_index`. -- -- - A value of -1 means use the line number(starting from zero), expects `int64`. -- - A value of -2 means use the whole line content, expects `string`. -- - A value >= 0 means use the index (starting at zero) of the split line based -- on `delimiter`. initializeTableFromTextFile :: forall v'2 m' . (MonadBuild m') => Data.Int.Int64 -- ^ __key_index__: Column index in a line to get the table `key` values from. -> Data.Int.Int64 -- ^ __value_index__: Column index that represents information of a line to get the table -- `value` values from. -> Tensor Ref Data.ByteString.ByteString -- ^ __table_handle__: Handle to a table which will be initialized. -> Tensor v'2 Data.ByteString.ByteString -- ^ __filename__: Filename of a vocabulary text file. -> m' (ControlNode) initializeTableFromTextFile = initializeTableFromTextFile' id initializeTableFromTextFile' :: forall v'2 m' . (MonadBuild m') => OpParams -> Data.Int.Int64 -- ^ __key_index__: Column index in a line to get the table `key` values from. -> Data.Int.Int64 -- ^ __value_index__: Column index that represents information of a line to get the table -- `value` values from. -> Tensor Ref Data.ByteString.ByteString -- ^ __table_handle__: Handle to a table which will be initialized. -> Tensor v'2 Data.ByteString.ByteString -- ^ __filename__: Filename of a vocabulary text file. -> m' (ControlNode) initializeTableFromTextFile' op'options key_index value_index table_handle filename | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs table_handle, buildInputs filename] buildOp [] (opDef "InitializeTableFromTextFile" & opAttr "key_index" .~ key_index & opAttr "value_index" .~ value_index & op'options & opInputs .~ op'inputs) {- input_arg { name: "table_handle" description: "Handle to a table which will be initialized." type: DT_STRING is_ref: true } input_arg { name: "filename" description: "Filename of a vocabulary text file." type: DT_STRING } attr { name: "key_index" type: "int" description: "Column index in a line to get the table `key` values from." has_minimum: true minimum: -2 } attr { name: "value_index" type: "int" description: "Column index that represents information of a line to get the table\n`value` values from." has_minimum: true minimum: -2 } attr { name: "vocab_size" type: "int" default_value { i: -1 } description: "Number of elements of the file, use -1 if unknown." has_minimum: true minimum: -1 } attr { name: "delimiter" type: "string" default_value { s: "\t" } description: "Delimiter to separate fields in a line." } -} -- | Computes the reciprocal of x element-wise. -- -- I.e., \\(y = 1 / x\\). inv :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ inv = inv' id inv' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ inv' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "Inv" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | Computes the gradient for the inverse of `x` wrt its input. -- -- Specifically, `grad = -dy * y*y`, where `y = 1/x`, and `dy` -- is the corresponding input gradient. invGrad :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ invGrad = invGrad' id invGrad' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ invGrad' op'options x y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y] return (opDef "InvGrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | Computes the inverse permutation of a tensor. -- -- This operation computes the inverse of an index permutation. It takes a 1-D -- integer tensor `x`, which represents the indices of a zero-based array, and -- swaps each value with its index position. In other words, for an output tensor -- `y` and an input tensor `x`, this operation computes the following: -- -- `y[x[i]] = i for i in [0, 1, ..., len(x) - 1]` -- -- The values must include 0. There can be no duplicate values or negative values. -- -- For example: -- -- ```prettyprint -- # tensor `x` is [3, 4, 0, 2, 1] -- invert_permutation(x) ==> [2, 4, 3, 0, 1] -- ``` invertPermutation :: forall v'1 t . (OneOf '[Data.Int.Int32, Data.Int.Int64] t) => Tensor v'1 t -- ^ __x__: 1-D. -> Tensor Build t -- ^ __y__: 1-D. invertPermutation = invertPermutation' id invertPermutation' :: forall v'1 t . (OneOf '[Data.Int.Int32, Data.Int.Int64] t) => OpParams -> Tensor v'1 t -- ^ __x__: 1-D. -> Tensor Build t -- ^ __y__: 1-D. invertPermutation' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "InvertPermutation" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" description: "1-D." type_attr: "T" } output_arg { name: "y" description: "1-D." type_attr: "T" } attr { name: "T" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | Returns which elements of x are finite. -- -- @compatibility(numpy) -- Equivalent to np.isfinite -- @end_compatibility isFinite :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build Bool -- ^ __y__ isFinite = isFinite' id isFinite' :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build Bool -- ^ __y__ isFinite' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "IsFinite" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type: DT_BOOL } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } -} -- | Returns which elements of x are Inf. -- -- @compatibility(numpy) -- Equivalent to np.isinf -- @end_compatibility isInf :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build Bool -- ^ __y__ isInf = isInf' id isInf' :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build Bool -- ^ __y__ isInf' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "IsInf" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type: DT_BOOL } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } -} -- | Returns which elements of x are NaN. -- -- @compatibility(numpy) -- Equivalent to np.isnan -- @end_compatibility isNan :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build Bool -- ^ __y__ isNan = isNan' id isNan' :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build Bool -- ^ __y__ isNan' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "IsNan" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type: DT_BOOL } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } -} -- | Checks whether a tensor has been initialized. -- -- Outputs boolean scalar indicating whether the tensor has been initialized. isVariableInitialized :: forall dtype m' . (MonadBuild m', TensorType dtype) => Tensor Ref dtype -- ^ __ref__: Should be from a `Variable` node. May be uninitialized. -> m' (Tensor Value Bool) -- ^ __is_initialized__ isVariableInitialized = isVariableInitialized' id isVariableInitialized' :: forall dtype m' . (MonadBuild m', TensorType dtype) => OpParams -> Tensor Ref dtype -- ^ __ref__: Should be from a `Variable` node. May be uninitialized. -> m' (Tensor Value Bool) -- ^ __is_initialized__ isVariableInitialized' op'options ref | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs ref] buildOp [] (opDef "IsVariableInitialized" & opAttr "dtype" .~ tensorType (undefined :: dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "ref" description: "Should be from a `Variable` node. May be uninitialized." type_attr: "dtype" is_ref: true } output_arg { name: "is_initialized" type: DT_BOOL } attr { name: "dtype" type: "type" description: "The type of elements in the variable tensor." } -} -- | L2 Loss. -- -- Computes half the L2 norm of a tensor without the `sqrt`: -- -- output = sum(t ** 2) / 2 l2Loss :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __t__: Typically 2-D, but may have any dimensions. -> Tensor Build t -- ^ __output__: 0-D. l2Loss = l2Loss' id l2Loss' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __t__: Typically 2-D, but may have any dimensions. -> Tensor Build t -- ^ __output__: 0-D. l2Loss' op'options t | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs t] return (opDef "L2Loss" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "t" description: "Typically 2-D, but may have any dimensions." type_attr: "T" } output_arg { name: "output" description: "0-D." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } -} -- | Local Response Normalization. -- -- The 4-D `input` tensor is treated as a 3-D array of 1-D vectors (along the last -- dimension), and each vector is normalized independently. Within a given vector, -- each component is divided by the weighted, squared sum of inputs within -- `depth_radius`. In detail, -- -- sqr_sum[a, b, c, d] = -- sum(input[a, b, c, d - depth_radius : d + depth_radius + 1] ** 2) -- output = input / (bias + alpha * sqr_sum) ** beta -- -- For details, see [Krizhevsky et al., ImageNet classification with deep -- convolutional neural networks (NIPS 2012)](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks). lRN :: forall v'1 t . (OneOf '[Data.Word.Word16, Float] t) => Tensor v'1 t -- ^ __input__: 4-D. -> Tensor Build t -- ^ __output__ lRN = lRN' id lRN' :: forall v'1 t . (OneOf '[Data.Word.Word16, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__: 4-D. -> Tensor Build t -- ^ __output__ lRN' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "LRN" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "4-D." type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "depth_radius" type: "int" default_value { i: 5 } description: "0-D. Half-width of the 1-D normalization window." } attr { name: "bias" type: "float" default_value { f: 1.0 } description: "An offset (usually positive to avoid dividing by 0)." } attr { name: "alpha" type: "float" default_value { f: 1.0 } description: "A scale factor, usually positive." } attr { name: "beta" type: "float" default_value { f: 0.5 } description: "An exponent." } attr { name: "T" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_FLOAT type: DT_HALF } } } -} -- | Gradients for Local Response Normalization. lRNGrad :: forall v'1 v'2 v'3 t . (OneOf '[Data.Word.Word16, Float] t) => Tensor v'1 t -- ^ __input_grads__: 4-D with shape `[batch, height, width, channels]`. -> Tensor v'2 t -- ^ __input_image__: 4-D with shape `[batch, height, width, channels]`. -> Tensor v'3 t -- ^ __output_image__: 4-D with shape `[batch, height, width, channels]`. -> Tensor Build t -- ^ __output__: The gradients for LRN. lRNGrad = lRNGrad' id lRNGrad' :: forall v'1 v'2 v'3 t . (OneOf '[Data.Word.Word16, Float] t) => OpParams -> Tensor v'1 t -- ^ __input_grads__: 4-D with shape `[batch, height, width, channels]`. -> Tensor v'2 t -- ^ __input_image__: 4-D with shape `[batch, height, width, channels]`. -> Tensor v'3 t -- ^ __output_image__: 4-D with shape `[batch, height, width, channels]`. -> Tensor Build t -- ^ __output__: The gradients for LRN. lRNGrad' op'options input_grads input_image output_image | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_grads, buildInputs input_image, buildInputs output_image] return (opDef "LRNGrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_grads" description: "4-D with shape `[batch, height, width, channels]`." type_attr: "T" } input_arg { name: "input_image" description: "4-D with shape `[batch, height, width, channels]`." type_attr: "T" } input_arg { name: "output_image" description: "4-D with shape `[batch, height, width, channels]`." type_attr: "T" } output_arg { name: "output" description: "The gradients for LRN." type_attr: "T" } attr { name: "depth_radius" type: "int" default_value { i: 5 } description: "A depth radius." } attr { name: "bias" type: "float" default_value { f: 1.0 } description: "An offset (usually > 0 to avoid dividing by 0)." } attr { name: "alpha" type: "float" default_value { f: 1.0 } description: "A scale factor, usually positive." } attr { name: "beta" type: "float" default_value { f: 0.5 } description: "An exponent." } attr { name: "T" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_FLOAT type: DT_HALF } } } -} -- | Generates labels for candidate sampling with a learned unigram distribution. -- -- See explanations of candidate sampling and the data formats at -- go/candidate-sampling. -- -- For each batch, this op picks a single set of sampled candidate labels. -- -- The advantages of sampling candidates per-batch are simplicity and the -- possibility of efficient dense matrix multiplication. The disadvantage is that -- the sampled candidates must be chosen independently of the context and of the -- true labels. learnedUnigramCandidateSampler :: Data.Int.Int64 -- ^ __num_sampled__: Number of candidates to randomly sample per batch. -> Data.Int.Int64 -- ^ __num_true__: Number of true labels per context. -> Data.Int.Int64 -- ^ __range_max__: The sampler will sample integers from the interval [0, range_max). -> Bool -- ^ __unique__: If unique is true, we sample with rejection, so that all sampled -- candidates in a batch are unique. This requires some approximation to -- estimate the post-rejection sampling probabilities. -> Tensor v'1 Data.Int.Int64 -- ^ __true_classes__: A batch_size * num_true matrix, in which each row contains the -- IDs of the num_true target_classes in the corresponding original label. -> (Tensor Build Data.Int.Int64, Tensor Build Float, Tensor Build Float) -- ^ (__sampled_candidates__, __true_expected_count__, __sampled_expected_count__) -- -- * __sampled_candidates__: A vector of length num_sampled, in which each element is -- the ID of a sampled candidate. -- -- * __true_expected_count__: A batch_size * num_true matrix, representing -- the number of times each candidate is expected to occur in a batch -- of sampled candidates. If unique=true, then this is a probability. -- -- * __sampled_expected_count__: A vector of length num_sampled, for each sampled -- candidate representing the number of times the candidate is expected -- to occur in a batch of sampled candidates. If unique=true, then this is a -- probability. learnedUnigramCandidateSampler = learnedUnigramCandidateSampler' id learnedUnigramCandidateSampler' :: OpParams -> Data.Int.Int64 -- ^ __num_sampled__: Number of candidates to randomly sample per batch. -> Data.Int.Int64 -- ^ __num_true__: Number of true labels per context. -> Data.Int.Int64 -- ^ __range_max__: The sampler will sample integers from the interval [0, range_max). -> Bool -- ^ __unique__: If unique is true, we sample with rejection, so that all sampled -- candidates in a batch are unique. This requires some approximation to -- estimate the post-rejection sampling probabilities. -> Tensor v'1 Data.Int.Int64 -- ^ __true_classes__: A batch_size * num_true matrix, in which each row contains the -- IDs of the num_true target_classes in the corresponding original label. -> (Tensor Build Data.Int.Int64, Tensor Build Float, Tensor Build Float) -- ^ (__sampled_candidates__, __true_expected_count__, __sampled_expected_count__) -- -- * __sampled_candidates__: A vector of length num_sampled, in which each element is -- the ID of a sampled candidate. -- -- * __true_expected_count__: A batch_size * num_true matrix, representing -- the number of times each candidate is expected to occur in a batch -- of sampled candidates. If unique=true, then this is a probability. -- -- * __sampled_expected_count__: A vector of length num_sampled, for each sampled -- candidate representing the number of times the candidate is expected -- to occur in a batch of sampled candidates. If unique=true, then this is a -- probability. learnedUnigramCandidateSampler' op'options num_sampled num_true range_max unique true_classes | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs true_classes] return (opDef "LearnedUnigramCandidateSampler" & opAttr "num_sampled" .~ num_sampled & opAttr "num_true" .~ num_true & opAttr "range_max" .~ range_max & opAttr "unique" .~ unique & op'options & opInputs .~ op'inputs) {- input_arg { name: "true_classes" description: "A batch_size * num_true matrix, in which each row contains the\nIDs of the num_true target_classes in the corresponding original label." type: DT_INT64 } output_arg { name: "sampled_candidates" description: "A vector of length num_sampled, in which each element is\nthe ID of a sampled candidate." type: DT_INT64 } output_arg { name: "true_expected_count" description: "A batch_size * num_true matrix, representing\nthe number of times each candidate is expected to occur in a batch\nof sampled candidates. If unique=true, then this is a probability." type: DT_FLOAT } output_arg { name: "sampled_expected_count" description: "A vector of length num_sampled, for each sampled\ncandidate representing the number of times the candidate is expected\nto occur in a batch of sampled candidates. If unique=true, then this is a\nprobability." type: DT_FLOAT } attr { name: "num_true" type: "int" description: "Number of true labels per context." has_minimum: true minimum: 1 } attr { name: "num_sampled" type: "int" description: "Number of candidates to randomly sample per batch." has_minimum: true minimum: 1 } attr { name: "unique" type: "bool" description: "If unique is true, we sample with rejection, so that all sampled\ncandidates in a batch are unique. This requires some approximation to\nestimate the post-rejection sampling probabilities." } attr { name: "range_max" type: "int" description: "The sampler will sample integers from the interval [0, range_max)." has_minimum: true minimum: 1 } attr { name: "seed" type: "int" default_value { i: 0 } description: "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed." } attr { name: "seed2" type: "int" default_value { i: 0 } description: "An second seed to avoid seed collision." } -} -- | Returns the truth value of (x < y) element-wise. -- -- *NOTE*: `Less` supports broadcasting. More about broadcasting -- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) less :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build Bool -- ^ __z__ less = less' id less' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build Bool -- ^ __z__ less' op'options x y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y] return (opDef "Less" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type: DT_BOOL } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF } } } -} -- | Returns the truth value of (x <= y) element-wise. -- -- *NOTE*: `LessEqual` supports broadcasting. More about broadcasting -- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) lessEqual :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build Bool -- ^ __z__ lessEqual = lessEqual' id lessEqual' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build Bool -- ^ __z__ lessEqual' op'options x y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y] return (opDef "LessEqual" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type: DT_BOOL } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF } } } -} -- | Computes the log of the absolute value of `Gamma(x)` element-wise. lgamma :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ lgamma = lgamma' id lgamma' :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ lgamma' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "Lgamma" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } -} -- | Generates values in an interval. -- -- A sequence of `num` evenly-spaced values are generated beginning at `start`. -- If `num > 1`, the values in the sequence increase by `stop - start / num - 1`, -- so that the last one is exactly `stop`. -- -- For example: -- -- ``` -- tf.linspace(10.0, 12.0, 3, name="linspace") => [ 10.0 11.0 12.0] -- ``` linSpace :: forall v'1 v'2 v'3 t tidx . (OneOf '[Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => Tensor v'1 t -- ^ __start__: First entry in the range. -> Tensor v'2 t -- ^ __stop__: Last entry in the range. -> Tensor v'3 tidx -- ^ __num__: Number of values to generate. -> Tensor Build t -- ^ __output__: 1-D. The generated values. linSpace = linSpace' id linSpace' :: forall v'1 v'2 v'3 t tidx . (OneOf '[Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => OpParams -> Tensor v'1 t -- ^ __start__: First entry in the range. -> Tensor v'2 t -- ^ __stop__: Last entry in the range. -> Tensor v'3 tidx -- ^ __num__: Number of values to generate. -> Tensor Build t -- ^ __output__: 1-D. The generated values. linSpace' op'options start stop num | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs start, buildInputs stop, buildInputs num] return (opDef "LinSpace" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tidx" .~ tensorType (undefined :: tidx) & op'options & opInputs .~ op'inputs) {- input_arg { name: "start" description: "First entry in the range." type_attr: "T" } input_arg { name: "stop" description: "Last entry in the range." type_attr: "T" } input_arg { name: "num" description: "Number of values to generate." type_attr: "Tidx" } output_arg { name: "output" description: "1-D. The generated values." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "Tidx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | Computes the difference between two lists of numbers or strings. -- -- Given a list `x` and a list `y`, this operation returns a list `out` that -- represents all values that are in `x` but not in `y`. The returned list `out` -- is sorted in the same order that the numbers appear in `x` (duplicates are -- preserved). This operation also returns a list `idx` that represents the -- position of each `out` element in `x`. In other words: -- -- `out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]` -- -- For example, given this input: -- -- ```prettyprint -- x = [1, 2, 3, 4, 5, 6] -- y = [1, 3, 5] -- ``` -- -- This operation would return: -- -- ```prettyprint -- out ==> [2, 4, 6] -- idx ==> [1, 3, 5] -- ``` listDiff :: forall v'1 v'2 t out_idx . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] out_idx) => Tensor v'1 t -- ^ __x__: 1-D. Values to keep. -> Tensor v'2 t -- ^ __y__: 1-D. Values to remove. -> (Tensor Build t, Tensor Build out_idx) -- ^ (__out__, __idx__) -- -- * __out__: 1-D. Values present in `x` but not in `y`. -- -- * __idx__: 1-D. Positions of `x` values preserved in `out`. listDiff = listDiff' id listDiff' :: forall v'1 v'2 t out_idx . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] out_idx) => OpParams -> Tensor v'1 t -- ^ __x__: 1-D. Values to keep. -> Tensor v'2 t -- ^ __y__: 1-D. Values to remove. -> (Tensor Build t, Tensor Build out_idx) -- ^ (__out__, __idx__) -- -- * __out__: 1-D. Values present in `x` but not in `y`. -- -- * __idx__: 1-D. Positions of `x` values preserved in `out`. listDiff' op'options x y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y] return (opDef "ListDiff" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "out_idx" .~ tensorType (undefined :: out_idx) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" description: "1-D. Values to keep." type_attr: "T" } input_arg { name: "y" description: "1-D. Values to remove." type_attr: "T" } output_arg { name: "out" description: "1-D. Values present in `x` but not in `y`." type_attr: "T" } output_arg { name: "idx" description: "1-D. Positions of `x` values preserved in `out`." type_attr: "out_idx" } attr { name: "T" type: "type" } attr { name: "out_idx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | Computes natural logarithm of x element-wise. -- -- I.e., \\(y = \log_e x\\). log :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ log = log' id log' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ log' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "Log" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | Computes natural logarithm of (1 + x) element-wise. -- -- I.e., \\(y = \log_e (1 + x)\\). log1p :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ log1p = log1p' id log1p' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ log1p' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "Log1p" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | Computes log softmax activations. -- -- For each batch `i` and class `j` we have -- -- logsoftmax[i, j] = logits[i, j] - log(sum(exp(logits[i]))) logSoftmax :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __logits__: 2-D with shape `[batch_size, num_classes]`. -> Tensor Build t -- ^ __logsoftmax__: Same shape as `logits`. logSoftmax = logSoftmax' id logSoftmax' :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __logits__: 2-D with shape `[batch_size, num_classes]`. -> Tensor Build t -- ^ __logsoftmax__: Same shape as `logits`. logSoftmax' op'options logits | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs logits] return (opDef "LogSoftmax" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "logits" description: "2-D with shape `[batch_size, num_classes]`." type_attr: "T" } output_arg { name: "logsoftmax" description: "Same shape as `logits`." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } -} -- | Generates labels for candidate sampling with a log-uniform distribution. -- -- See explanations of candidate sampling and the data formats at -- go/candidate-sampling. -- -- For each batch, this op picks a single set of sampled candidate labels. -- -- The advantages of sampling candidates per-batch are simplicity and the -- possibility of efficient dense matrix multiplication. The disadvantage is that -- the sampled candidates must be chosen independently of the context and of the -- true labels. logUniformCandidateSampler :: Data.Int.Int64 -- ^ __num_sampled__: Number of candidates to randomly sample per batch. -> Data.Int.Int64 -- ^ __num_true__: Number of true labels per context. -> Data.Int.Int64 -- ^ __range_max__: The sampler will sample integers from the interval [0, range_max). -> Bool -- ^ __unique__: If unique is true, we sample with rejection, so that all sampled -- candidates in a batch are unique. This requires some approximation to -- estimate the post-rejection sampling probabilities. -> Tensor v'1 Data.Int.Int64 -- ^ __true_classes__: A batch_size * num_true matrix, in which each row contains the -- IDs of the num_true target_classes in the corresponding original label. -> (Tensor Build Data.Int.Int64, Tensor Build Float, Tensor Build Float) -- ^ (__sampled_candidates__, __true_expected_count__, __sampled_expected_count__) -- -- * __sampled_candidates__: A vector of length num_sampled, in which each element is -- the ID of a sampled candidate. -- -- * __true_expected_count__: A batch_size * num_true matrix, representing -- the number of times each candidate is expected to occur in a batch -- of sampled candidates. If unique=true, then this is a probability. -- -- * __sampled_expected_count__: A vector of length num_sampled, for each sampled -- candidate representing the number of times the candidate is expected -- to occur in a batch of sampled candidates. If unique=true, then this is a -- probability. logUniformCandidateSampler = logUniformCandidateSampler' id logUniformCandidateSampler' :: OpParams -> Data.Int.Int64 -- ^ __num_sampled__: Number of candidates to randomly sample per batch. -> Data.Int.Int64 -- ^ __num_true__: Number of true labels per context. -> Data.Int.Int64 -- ^ __range_max__: The sampler will sample integers from the interval [0, range_max). -> Bool -- ^ __unique__: If unique is true, we sample with rejection, so that all sampled -- candidates in a batch are unique. This requires some approximation to -- estimate the post-rejection sampling probabilities. -> Tensor v'1 Data.Int.Int64 -- ^ __true_classes__: A batch_size * num_true matrix, in which each row contains the -- IDs of the num_true target_classes in the corresponding original label. -> (Tensor Build Data.Int.Int64, Tensor Build Float, Tensor Build Float) -- ^ (__sampled_candidates__, __true_expected_count__, __sampled_expected_count__) -- -- * __sampled_candidates__: A vector of length num_sampled, in which each element is -- the ID of a sampled candidate. -- -- * __true_expected_count__: A batch_size * num_true matrix, representing -- the number of times each candidate is expected to occur in a batch -- of sampled candidates. If unique=true, then this is a probability. -- -- * __sampled_expected_count__: A vector of length num_sampled, for each sampled -- candidate representing the number of times the candidate is expected -- to occur in a batch of sampled candidates. If unique=true, then this is a -- probability. logUniformCandidateSampler' op'options num_sampled num_true range_max unique true_classes | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs true_classes] return (opDef "LogUniformCandidateSampler" & opAttr "num_sampled" .~ num_sampled & opAttr "num_true" .~ num_true & opAttr "range_max" .~ range_max & opAttr "unique" .~ unique & op'options & opInputs .~ op'inputs) {- input_arg { name: "true_classes" description: "A batch_size * num_true matrix, in which each row contains the\nIDs of the num_true target_classes in the corresponding original label." type: DT_INT64 } output_arg { name: "sampled_candidates" description: "A vector of length num_sampled, in which each element is\nthe ID of a sampled candidate." type: DT_INT64 } output_arg { name: "true_expected_count" description: "A batch_size * num_true matrix, representing\nthe number of times each candidate is expected to occur in a batch\nof sampled candidates. If unique=true, then this is a probability." type: DT_FLOAT } output_arg { name: "sampled_expected_count" description: "A vector of length num_sampled, for each sampled\ncandidate representing the number of times the candidate is expected\nto occur in a batch of sampled candidates. If unique=true, then this is a\nprobability." type: DT_FLOAT } attr { name: "num_true" type: "int" description: "Number of true labels per context." has_minimum: true minimum: 1 } attr { name: "num_sampled" type: "int" description: "Number of candidates to randomly sample per batch." has_minimum: true minimum: 1 } attr { name: "unique" type: "bool" description: "If unique is true, we sample with rejection, so that all sampled\ncandidates in a batch are unique. This requires some approximation to\nestimate the post-rejection sampling probabilities." } attr { name: "range_max" type: "int" description: "The sampler will sample integers from the interval [0, range_max)." has_minimum: true minimum: 1 } attr { name: "seed" type: "int" default_value { i: 0 } description: "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed." } attr { name: "seed2" type: "int" default_value { i: 0 } description: "An second seed to avoid seed collision." } -} -- | Returns the truth value of x AND y element-wise. -- -- *NOTE*: `LogicalAnd` supports broadcasting. More about broadcasting -- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) logicalAnd :: Tensor v'1 Bool -- ^ __x__ -> Tensor v'2 Bool -- ^ __y__ -> Tensor Build Bool -- ^ __z__ logicalAnd = logicalAnd' id logicalAnd' :: OpParams -> Tensor v'1 Bool -- ^ __x__ -> Tensor v'2 Bool -- ^ __y__ -> Tensor Build Bool -- ^ __z__ logicalAnd' op'options x y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y] return (opDef "LogicalAnd" & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type: DT_BOOL } input_arg { name: "y" type: DT_BOOL } output_arg { name: "z" type: DT_BOOL } -} -- | Returns the truth value of NOT x element-wise. logicalNot :: Tensor v'1 Bool -- ^ __x__ -> Tensor Build Bool -- ^ __y__ logicalNot = logicalNot' id logicalNot' :: OpParams -> Tensor v'1 Bool -- ^ __x__ -> Tensor Build Bool -- ^ __y__ logicalNot' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "LogicalNot" & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type: DT_BOOL } output_arg { name: "y" type: DT_BOOL } -} -- | Returns the truth value of x OR y element-wise. -- -- *NOTE*: `LogicalOr` supports broadcasting. More about broadcasting -- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) logicalOr :: Tensor v'1 Bool -- ^ __x__ -> Tensor v'2 Bool -- ^ __y__ -> Tensor Build Bool -- ^ __z__ logicalOr = logicalOr' id logicalOr' :: OpParams -> Tensor v'1 Bool -- ^ __x__ -> Tensor v'2 Bool -- ^ __y__ -> Tensor Build Bool -- ^ __z__ logicalOr' op'options x y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y] return (opDef "LogicalOr" & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type: DT_BOOL } input_arg { name: "y" type: DT_BOOL } output_arg { name: "z" type: DT_BOOL } -} -- | Outputs all keys and values in the table. lookupTableExport :: forall tkeys tvalues m' . (MonadBuild m', TensorType tkeys, TensorType tvalues) => Tensor Ref Data.ByteString.ByteString -- ^ __table_handle__: Handle to the table. -> m' ((Tensor Value tkeys, Tensor Value tvalues)) -- ^ (__keys__, __values__) -- -- * __keys__: Vector of all keys present in the table. -- -- * __values__: Tensor of all values in the table. Indexed in parallel with `keys`. lookupTableExport = lookupTableExport' id lookupTableExport' :: forall tkeys tvalues m' . (MonadBuild m', TensorType tkeys, TensorType tvalues) => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __table_handle__: Handle to the table. -> m' ((Tensor Value tkeys, Tensor Value tvalues)) -- ^ (__keys__, __values__) -- -- * __keys__: Vector of all keys present in the table. -- -- * __values__: Tensor of all values in the table. Indexed in parallel with `keys`. lookupTableExport' op'options table_handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs table_handle] buildOp [] (opDef "LookupTableExport" & opAttr "Tkeys" .~ tensorType (undefined :: tkeys) & opAttr "Tvalues" .~ tensorType (undefined :: tvalues) & op'options & opInputs .~ op'inputs) {- input_arg { name: "table_handle" description: "Handle to the table." type: DT_STRING is_ref: true } output_arg { name: "keys" description: "Vector of all keys present in the table." type_attr: "Tkeys" } output_arg { name: "values" description: "Tensor of all values in the table. Indexed in parallel with `keys`." type_attr: "Tvalues" } attr { name: "Tkeys" type: "type" } attr { name: "Tvalues" type: "type" } -} -- | Looks up keys in a table, outputs the corresponding values. -- -- The tensor `keys` must of the same type as the keys of the table. -- The output `values` is of the type of the table values. -- -- The scalar `default_value` is the value output for keys not present in the -- table. It must also be of the same type as the table values. lookupTableFind :: forall v'2 v'3 tin tout m' . (MonadBuild m', TensorType tin, TensorType tout) => Tensor Ref Data.ByteString.ByteString -- ^ __table_handle__: Handle to the table. -> Tensor v'2 tin -- ^ __keys__: Any shape. Keys to look up. -> Tensor v'3 tout -- ^ __default_value__ -> m' (Tensor Value tout) -- ^ __values__: Same shape as `keys`. Values found in the table, or `default_values` -- for missing keys. lookupTableFind = lookupTableFind' id lookupTableFind' :: forall v'2 v'3 tin tout m' . (MonadBuild m', TensorType tin, TensorType tout) => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __table_handle__: Handle to the table. -> Tensor v'2 tin -- ^ __keys__: Any shape. Keys to look up. -> Tensor v'3 tout -- ^ __default_value__ -> m' (Tensor Value tout) -- ^ __values__: Same shape as `keys`. Values found in the table, or `default_values` -- for missing keys. lookupTableFind' op'options table_handle keys default_value | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs table_handle, buildInputs keys, buildInputs default_value] buildOp [] (opDef "LookupTableFind" & opAttr "Tin" .~ tensorType (undefined :: tin) & opAttr "Tout" .~ tensorType (undefined :: tout) & op'options & opInputs .~ op'inputs) {- input_arg { name: "table_handle" description: "Handle to the table." type: DT_STRING is_ref: true } input_arg { name: "keys" description: "Any shape. Keys to look up." type_attr: "Tin" } input_arg { name: "default_value" type_attr: "Tout" } output_arg { name: "values" description: "Same shape as `keys`. Values found in the table, or `default_values`\nfor missing keys." type_attr: "Tout" } attr { name: "Tin" type: "type" } attr { name: "Tout" type: "type" } -} -- | Replaces the contents of the table with the specified keys and values. -- -- The tensor `keys` must be of the same type as the keys of the table. -- The tensor `values` must be of the type of the table values. lookupTableImport :: forall v'2 v'3 tin tout m' . (MonadBuild m', TensorType tin, TensorType tout) => Tensor Ref Data.ByteString.ByteString -- ^ __table_handle__: Handle to the table. -> Tensor v'2 tin -- ^ __keys__: Any shape. Keys to look up. -> Tensor v'3 tout -- ^ __values__: Values to associate with keys. -> m' (ControlNode) lookupTableImport = lookupTableImport' id lookupTableImport' :: forall v'2 v'3 tin tout m' . (MonadBuild m', TensorType tin, TensorType tout) => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __table_handle__: Handle to the table. -> Tensor v'2 tin -- ^ __keys__: Any shape. Keys to look up. -> Tensor v'3 tout -- ^ __values__: Values to associate with keys. -> m' (ControlNode) lookupTableImport' op'options table_handle keys values | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs table_handle, buildInputs keys, buildInputs values] buildOp [] (opDef "LookupTableImport" & opAttr "Tin" .~ tensorType (undefined :: tin) & opAttr "Tout" .~ tensorType (undefined :: tout) & op'options & opInputs .~ op'inputs) {- input_arg { name: "table_handle" description: "Handle to the table." type: DT_STRING is_ref: true } input_arg { name: "keys" description: "Any shape. Keys to look up." type_attr: "Tin" } input_arg { name: "values" description: "Values to associate with keys." type_attr: "Tout" } attr { name: "Tin" type: "type" } attr { name: "Tout" type: "type" } -} -- | Updates the table to associates keys with values. -- -- The tensor `keys` must be of the same type as the keys of the table. -- The tensor `values` must be of the type of the table values. lookupTableInsert :: forall v'2 v'3 tin tout m' . (MonadBuild m', TensorType tin, TensorType tout) => Tensor Ref Data.ByteString.ByteString -- ^ __table_handle__: Handle to the table. -> Tensor v'2 tin -- ^ __keys__: Any shape. Keys to look up. -> Tensor v'3 tout -- ^ __values__: Values to associate with keys. -> m' (ControlNode) lookupTableInsert = lookupTableInsert' id lookupTableInsert' :: forall v'2 v'3 tin tout m' . (MonadBuild m', TensorType tin, TensorType tout) => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __table_handle__: Handle to the table. -> Tensor v'2 tin -- ^ __keys__: Any shape. Keys to look up. -> Tensor v'3 tout -- ^ __values__: Values to associate with keys. -> m' (ControlNode) lookupTableInsert' op'options table_handle keys values | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs table_handle, buildInputs keys, buildInputs values] buildOp [] (opDef "LookupTableInsert" & opAttr "Tin" .~ tensorType (undefined :: tin) & opAttr "Tout" .~ tensorType (undefined :: tout) & op'options & opInputs .~ op'inputs) {- input_arg { name: "table_handle" description: "Handle to the table." type: DT_STRING is_ref: true } input_arg { name: "keys" description: "Any shape. Keys to look up." type_attr: "Tin" } input_arg { name: "values" description: "Values to associate with keys." type_attr: "Tout" } attr { name: "Tin" type: "type" } attr { name: "Tout" type: "type" } -} -- | Computes the number of elements in the given table. lookupTableSize :: forall m' . (MonadBuild m') => Tensor Ref Data.ByteString.ByteString -- ^ __table_handle__: Handle to the table. -> m' (Tensor Value Data.Int.Int64) -- ^ __size__: Scalar that contains number of elements in the table. lookupTableSize = lookupTableSize' id lookupTableSize' :: forall m' . (MonadBuild m') => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __table_handle__: Handle to the table. -> m' (Tensor Value Data.Int.Int64) -- ^ __size__: Scalar that contains number of elements in the table. lookupTableSize' op'options table_handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs table_handle] buildOp [] (opDef "LookupTableSize" & op'options & opInputs .~ op'inputs) {- input_arg { name: "table_handle" description: "Handle to the table." type: DT_STRING is_ref: true } output_arg { name: "size" description: "Scalar that contains number of elements in the table." type: DT_INT64 } -} -- | Forwards the input to the output. -- -- This operator represents the loop termination condition used by the -- "pivot" switches of a loop. loopCond :: Tensor v'1 Bool -- ^ __input__: A boolean scalar, representing the branch predicate of the Switch op. -> Tensor Build Bool -- ^ __output__: The same tensor as `input`. loopCond = loopCond' id loopCond' :: OpParams -> Tensor v'1 Bool -- ^ __input__: A boolean scalar, representing the branch predicate of the Switch op. -> Tensor Build Bool -- ^ __output__: The same tensor as `input`. loopCond' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "LoopCond" & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "A boolean scalar, representing the branch predicate of the Switch op." type: DT_BOOL } output_arg { name: "output" description: "The same tensor as `input`." type: DT_BOOL } -} -- | Multiply the matrix "a" by the matrix "b". -- -- The inputs must be two-dimensional matrices and the inner dimension of -- "a" (after being transposed if transpose_a is true) must match the -- outer dimension of "b" (after being transposed if transposed_b is -- true). -- -- *Note*: The default kernel implementation for MatMul on GPUs uses -- cublas. matMul :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __a__ -> Tensor v'2 t -- ^ __b__ -> Tensor Build t -- ^ __product__ matMul = matMul' id matMul' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __a__ -> Tensor v'2 t -- ^ __b__ -> Tensor Build t -- ^ __product__ matMul' op'options a b | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs a, buildInputs b] return (opDef "MatMul" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "a" type_attr: "T" } input_arg { name: "b" type_attr: "T" } output_arg { name: "product" type_attr: "T" } attr { name: "transpose_a" type: "bool" default_value { b: false } description: "If true, \"a\" is transposed before multiplication." } attr { name: "transpose_b" type: "bool" default_value { b: false } description: "If true, \"b\" is transposed before multiplication." } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | Returns the set of files matching one or more glob patterns. -- -- Note that this routine only supports wildcard characters in the -- basename portion of the pattern, not in the directory portion. matchingFiles :: Tensor v'1 Data.ByteString.ByteString -- ^ __pattern__: Shell wildcard pattern(s). Scalar or vector of type string. -> Tensor Build Data.ByteString.ByteString -- ^ __filenames__: A vector of matching filenames. matchingFiles = matchingFiles' id matchingFiles' :: OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __pattern__: Shell wildcard pattern(s). Scalar or vector of type string. -> Tensor Build Data.ByteString.ByteString -- ^ __filenames__: A vector of matching filenames. matchingFiles' op'options pattern | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs pattern] return (opDef "MatchingFiles" & op'options & opInputs .~ op'inputs) {- input_arg { name: "pattern" description: "Shell wildcard pattern(s). Scalar or vector of type string." type: DT_STRING } output_arg { name: "filenames" description: "A vector of matching filenames." type: DT_STRING } -} -- | Copy a tensor setting everything outside a central band in each innermost matrix -- -- to zero. -- -- The `band` part is computed as follows: -- Assume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a -- tensor with the same shape where -- -- `band[i, j, k, ..., m, n] = in_band(m, n) * input[i, j, k, ..., m, n]`. -- -- The indicator function -- -- `in_band(m, n) = (num_lower < 0 || (m-n) <= num_lower)) && -- (num_upper < 0 || (n-m) <= num_upper)`. -- -- For example: -- -- ```prettyprint -- # if 'input' is [[ 0, 1, 2, 3] -- [-1, 0, 1, 2] -- [-2, -1, 0, 1] -- [-3, -2, -1, 0]], -- -- tf.matrix_band_part(input, 1, -1) ==> [[ 0, 1, 2, 3] -- [-1, 0, 1, 2] -- [ 0, -1, 0, 1] -- [ 0, 0, -1, 0]], -- -- tf.matrix_band_part(input, 2, 1) ==> [[ 0, 1, 0, 0] -- [-1, 0, 1, 0] -- [-2, -1, 0, 1] -- [ 0, -2, -1, 0]] -- ``` -- -- Useful special cases: -- -- ```prettyprint -- tf.matrix_band_part(input, 0, -1) ==> Upper triangular part. -- tf.matrix_band_part(input, -1, 0) ==> Lower triangular part. -- tf.matrix_band_part(input, 0, 0) ==> Diagonal. -- ``` matrixBandPart :: forall v'1 v'2 v'3 t . (TensorType t) => Tensor v'1 t -- ^ __input__: Rank `k` tensor. -> Tensor v'2 Data.Int.Int64 -- ^ __num_lower__: 0-D tensor. Number of subdiagonals to keep. If negative, keep entire -- lower triangle. -> Tensor v'3 Data.Int.Int64 -- ^ __num_upper__: 0-D tensor. Number of superdiagonals to keep. If negative, keep -- entire upper triangle. -> Tensor Build t -- ^ __band__: Rank `k` tensor of the same shape as input. The extracted banded tensor. matrixBandPart = matrixBandPart' id matrixBandPart' :: forall v'1 v'2 v'3 t . (TensorType t) => OpParams -> Tensor v'1 t -- ^ __input__: Rank `k` tensor. -> Tensor v'2 Data.Int.Int64 -- ^ __num_lower__: 0-D tensor. Number of subdiagonals to keep. If negative, keep entire -- lower triangle. -> Tensor v'3 Data.Int.Int64 -- ^ __num_upper__: 0-D tensor. Number of superdiagonals to keep. If negative, keep -- entire upper triangle. -> Tensor Build t -- ^ __band__: Rank `k` tensor of the same shape as input. The extracted banded tensor. matrixBandPart' op'options input num_lower num_upper | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs num_lower, buildInputs num_upper] return (opDef "MatrixBandPart" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "Rank `k` tensor." type_attr: "T" } input_arg { name: "num_lower" description: "0-D tensor. Number of subdiagonals to keep. If negative, keep entire\nlower triangle." type: DT_INT64 } input_arg { name: "num_upper" description: "0-D tensor. Number of superdiagonals to keep. If negative, keep\nentire upper triangle." type: DT_INT64 } output_arg { name: "band" description: "Rank `k` tensor of the same shape as input. The extracted banded tensor." type_attr: "T" } attr { name: "T" type: "type" } -} -- | Computes the determinant of one ore more square matrices. -- -- The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions -- form square matrices. The output is a tensor containing the determinants -- for all input submatrices `[..., :, :]`. matrixDeterminant :: forall v'1 t . (OneOf '[Double, Float] t) => Tensor v'1 t -- ^ __input__: Shape is `[..., M, M]`. -> Tensor Build t -- ^ __output__: Shape is `[...]`. matrixDeterminant = matrixDeterminant' id matrixDeterminant' :: forall v'1 t . (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__: Shape is `[..., M, M]`. -> Tensor Build t -- ^ __output__: Shape is `[...]`. matrixDeterminant' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "MatrixDeterminant" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "Shape is `[..., M, M]`." type_attr: "T" } output_arg { name: "output" description: "Shape is `[...]`." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } -} -- | Returns a batched diagonal tensor with a given batched diagonal values. -- -- Given a `diagonal`, this operation returns a tensor with the `diagonal` and -- everything else padded with zeros. The diagonal is computed as follows: -- -- Assume `diagonal` has `k` dimensions `[I, J, K, ..., N]`, then the output is a -- tensor of rank `k+1` with dimensions [I, J, K, ..., N, N]` where: -- -- `output[i, j, k, ..., m, n] = 1{m=n} * diagonal[i, j, k, ..., n]`. -- -- For example: -- -- ```prettyprint -- # 'diagonal' is [[1, 2, 3, 4], [5, 6, 7, 8]] -- -- and diagonal.shape = (2, 4) -- -- tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0] -- [0, 2, 0, 0] -- [0, 0, 3, 0] -- [0, 0, 0, 4]], -- [[5, 0, 0, 0] -- [0, 6, 0, 0] -- [0, 0, 7, 0] -- [0, 0, 0, 8]]] -- -- which has shape (2, 4, 4) -- ``` matrixDiag :: forall v'1 t . (TensorType t) => Tensor v'1 t -- ^ __diagonal__: Rank `k`, where `k >= 1`. -> Tensor Build t -- ^ __output__: Rank `k+1`, with `output.shape = diagonal.shape + [diagonal.shape[-1]]`. matrixDiag = matrixDiag' id matrixDiag' :: forall v'1 t . (TensorType t) => OpParams -> Tensor v'1 t -- ^ __diagonal__: Rank `k`, where `k >= 1`. -> Tensor Build t -- ^ __output__: Rank `k+1`, with `output.shape = diagonal.shape + [diagonal.shape[-1]]`. matrixDiag' op'options diagonal | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs diagonal] return (opDef "MatrixDiag" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "diagonal" description: "Rank `k`, where `k >= 1`." type_attr: "T" } output_arg { name: "output" description: "Rank `k+1`, with `output.shape = diagonal.shape + [diagonal.shape[-1]]`." type_attr: "T" } attr { name: "T" type: "type" } -} -- | Returns the batched diagonal part of a batched tensor. -- -- This operation returns a tensor with the `diagonal` part -- of the batched `input`. The `diagonal` part is computed as follows: -- -- Assume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a -- tensor of rank `k - 1` with dimensions `[I, J, K, ..., min(M, N)]` where: -- -- `diagonal[i, j, k, ..., n] = input[i, j, k, ..., n, n]`. -- -- The input must be at least a matrix. -- -- For example: -- -- ```prettyprint -- # 'input' is [[[1, 0, 0, 0] -- [0, 2, 0, 0] -- [0, 0, 3, 0] -- [0, 0, 0, 4]], -- [[5, 0, 0, 0] -- [0, 6, 0, 0] -- [0, 0, 7, 0] -- [0, 0, 0, 8]]] -- -- and input.shape = (2, 4, 4) -- -- tf.matrix_diag_part(input) ==> [[1, 2, 3, 4], [5, 6, 7, 8]] -- -- which has shape (2, 4) -- ``` matrixDiagPart :: forall v'1 t . (TensorType t) => Tensor v'1 t -- ^ __input__: Rank `k` tensor where `k >= 2`. -> Tensor Build t -- ^ __diagonal__: The extracted diagonal(s) having shape -- `diagonal.shape = input.shape[:-2] + [min(input.shape[-2:])]`. matrixDiagPart = matrixDiagPart' id matrixDiagPart' :: forall v'1 t . (TensorType t) => OpParams -> Tensor v'1 t -- ^ __input__: Rank `k` tensor where `k >= 2`. -> Tensor Build t -- ^ __diagonal__: The extracted diagonal(s) having shape -- `diagonal.shape = input.shape[:-2] + [min(input.shape[-2:])]`. matrixDiagPart' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "MatrixDiagPart" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "Rank `k` tensor where `k >= 2`." type_attr: "T" } output_arg { name: "diagonal" description: "The extracted diagonal(s) having shape\n`diagonal.shape = input.shape[:-2] + [min(input.shape[-2:])]`." type_attr: "T" } attr { name: "T" type: "type" } -} -- | Computes the inverse of one or more square invertible matrices or their -- -- adjoints (conjugate transposes). -- -- The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions -- form square matrices. The output is a tensor of the same shape as the input -- containing the inverse for all input submatrices `[..., :, :]`. -- -- The op uses LU decomposition with partial pivoting to compute the inverses. -- -- If a matrix is not invertible there is no guarantee what the op does. It -- may detect the condition and raise an exception or it may simply return a -- garbage result. matrixInverse :: forall v'1 t . (OneOf '[Double, Float] t) => Tensor v'1 t -- ^ __input__: Shape is `[..., M, M]`. -> Tensor Build t -- ^ __output__: Shape is `[..., M, M]`. -- -- @compatibility(numpy) -- Equivalent to np.linalg.inv -- @end_compatibility matrixInverse = matrixInverse' id matrixInverse' :: forall v'1 t . (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__: Shape is `[..., M, M]`. -> Tensor Build t -- ^ __output__: Shape is `[..., M, M]`. -- -- @compatibility(numpy) -- Equivalent to np.linalg.inv -- @end_compatibility matrixInverse' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "MatrixInverse" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "Shape is `[..., M, M]`." type_attr: "T" } output_arg { name: "output" description: "Shape is `[..., M, M]`.\n\n@compatibility(numpy)\nEquivalent to np.linalg.inv\n@end_compatibility" type_attr: "T" } attr { name: "adjoint" type: "bool" default_value { b: false } } attr { name: "T" type: "type" allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } } } -} -- | Returns a batched matrix tensor with new batched diagonal values. -- -- Given `input` and `diagonal`, this operation returns a tensor with the -- same shape and values as `input`, except for the main diagonal of the -- innermost matrices. These will be overwritten by the values in `diagonal`. -- -- The output is computed as follows: -- -- Assume `input` has `k+1` dimensions `[I, J, K, ..., M, N]` and `diagonal` has -- `k` dimensions `[I, J, K, ..., min(M, N)]`. Then the output is a -- tensor of rank `k+1` with dimensions `[I, J, K, ..., M, N]` where: -- -- * `output[i, j, k, ..., m, n] = diagonal[i, j, k, ..., n]` for `m == n`. -- * `output[i, j, k, ..., m, n] = input[i, j, k, ..., m, n]` for `m != n`. matrixSetDiag :: forall v'1 v'2 t . (TensorType t) => Tensor v'1 t -- ^ __input__: Rank `k+1`, where `k >= 1`. -> Tensor v'2 t -- ^ __diagonal__: Rank `k`, where `k >= 1`. -> Tensor Build t -- ^ __output__: Rank `k+1`, with `output.shape = input.shape`. matrixSetDiag = matrixSetDiag' id matrixSetDiag' :: forall v'1 v'2 t . (TensorType t) => OpParams -> Tensor v'1 t -- ^ __input__: Rank `k+1`, where `k >= 1`. -> Tensor v'2 t -- ^ __diagonal__: Rank `k`, where `k >= 1`. -> Tensor Build t -- ^ __output__: Rank `k+1`, with `output.shape = input.shape`. matrixSetDiag' op'options input diagonal | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs diagonal] return (opDef "MatrixSetDiag" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "Rank `k+1`, where `k >= 1`." type_attr: "T" } input_arg { name: "diagonal" description: "Rank `k`, where `k >= 1`." type_attr: "T" } output_arg { name: "output" description: "Rank `k+1`, with `output.shape = input.shape`." type_attr: "T" } attr { name: "T" type: "type" } -} -- | Solves systems of linear equations. -- -- `Matrix` is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions -- form square matrices. `Rhs` is a tensor of shape `[..., M, K]`. The `output` is -- a tensor shape `[..., M, K]`. If `adjoint` is `False` then each output matrix -- satisfies `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`. -- If `adjoint` is `True` then each output matrix satisfies -- `adjoint(matrix[..., :, :]) * output[..., :, :] = rhs[..., :, :]`. matrixSolve :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Double, Float] t) => Tensor v'1 t -- ^ __matrix__: Shape is `[..., M, M]`. -> Tensor v'2 t -- ^ __rhs__: Shape is `[..., M, K]`. -> Tensor Build t -- ^ __output__: Shape is `[..., M, K]`. matrixSolve = matrixSolve' id matrixSolve' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __matrix__: Shape is `[..., M, M]`. -> Tensor v'2 t -- ^ __rhs__: Shape is `[..., M, K]`. -> Tensor Build t -- ^ __output__: Shape is `[..., M, K]`. matrixSolve' op'options matrix rhs | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs matrix, buildInputs rhs] return (opDef "MatrixSolve" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "matrix" description: "Shape is `[..., M, M]`." type_attr: "T" } input_arg { name: "rhs" description: "Shape is `[..., M, K]`." type_attr: "T" } output_arg { name: "output" description: "Shape is `[..., M, K]`." type_attr: "T" } attr { name: "adjoint" type: "bool" default_value { b: false } description: "Boolean indicating whether to solve with `matrix` or its (block-wise)\nadjoint." } attr { name: "T" type: "type" allowed_values { list { type: DT_DOUBLE type: DT_FLOAT type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | Solves one or more linear least-squares problems. -- -- `matrix` is a tensor of shape `[..., M, N]` whose inner-most 2 dimensions -- form matrices of size `[M, N]`. Rhs is a tensor of shape `[..., M, K]`. -- The output is a tensor shape `[..., N, K]` where each output matrix solves -- each of the equations matrix[..., :, :] * output[..., :, :] = rhs[..., :, :] -- in the least squares sense. -- -- matrix and right-hand sides in the batch: -- -- `matrix`=\\(A \in \Re^{m \times n}\\), -- `rhs`=\\(B \in \Re^{m \times k}\\), -- `output`=\\(X \in \Re^{n \times k}\\), -- `l2_regularizer`=\\(\lambda\\). -- -- If `fast` is `True`, then the solution is computed by solving the normal -- equations using Cholesky decomposition. Specifically, if \\(m \ge n\\) then -- \\(X = (A^T A + \lambda I)^{-1} A^T B\\), which solves the least-squares -- problem \\(X = \mathrm{argmin}_{Z \in \Re^{n \times k} } ||A Z - B||_F^2 + -- \lambda ||Z||_F^2\\). If \\(m \lt n\\) then `output` is computed as -- \\(X = A^T (A A^T + \lambda I)^{-1} B\\), which (for \\(\lambda = 0\\)) is the -- minimum-norm solution to the under-determined linear system, i.e. -- \\(X = \mathrm{argmin}_{Z \in \Re^{n \times k} } ||Z||_F^2 \\), subject to -- \\(A Z = B\\). Notice that the fast path is only numerically stable when -- \\(A\\) is numerically full rank and has a condition number -- \\(\mathrm{cond}(A) \lt \frac{1}{\sqrt{\epsilon_{mach} } }\\) or\\(\lambda\\) is -- sufficiently large. -- -- If `fast` is `False` an algorithm based on the numerically robust complete -- orthogonal decomposition is used. This computes the minimum-norm -- least-squares solution, even when \\(A\\) is rank deficient. This path is -- typically 6-7 times slower than the fast path. If `fast` is `False` then -- `l2_regularizer` is ignored. matrixSolveLs :: forall v'1 v'2 v'3 t . (OneOf '[Double, Float] t) => Tensor v'1 t -- ^ __matrix__: Shape is `[..., M, N]`. -> Tensor v'2 t -- ^ __rhs__: Shape is `[..., M, K]`. -> Tensor v'3 Double -- ^ __l2_regularizer__: Scalar tensor. -- -- @compatibility(numpy) -- Equivalent to np.linalg.lstsq -- @end_compatibility -> Tensor Build t -- ^ __output__: Shape is `[..., N, K]`. matrixSolveLs = matrixSolveLs' id matrixSolveLs' :: forall v'1 v'2 v'3 t . (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __matrix__: Shape is `[..., M, N]`. -> Tensor v'2 t -- ^ __rhs__: Shape is `[..., M, K]`. -> Tensor v'3 Double -- ^ __l2_regularizer__: Scalar tensor. -- -- @compatibility(numpy) -- Equivalent to np.linalg.lstsq -- @end_compatibility -> Tensor Build t -- ^ __output__: Shape is `[..., N, K]`. matrixSolveLs' op'options matrix rhs l2_regularizer | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs matrix, buildInputs rhs, buildInputs l2_regularizer] return (opDef "MatrixSolveLs" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "matrix" description: "Shape is `[..., M, N]`." type_attr: "T" } input_arg { name: "rhs" description: "Shape is `[..., M, K]`." type_attr: "T" } input_arg { name: "l2_regularizer" description: "Scalar tensor.\n\n@compatibility(numpy)\nEquivalent to np.linalg.lstsq\n@end_compatibility" type: DT_DOUBLE } output_arg { name: "output" description: "Shape is `[..., N, K]`." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } } } attr { name: "fast" type: "bool" default_value { b: true } } -} -- | Solves systems of linear equations with upper or lower triangular matrices by -- -- backsubstitution. -- -- `matrix` is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions form -- square matrices. If `lower` is `True` then the strictly upper triangular part -- of each inner-most matrix is assumed to be zero and not accessed. -- If `lower` is False then the strictly lower triangular part of each inner-most -- matrix is assumed to be zero and not accessed. -- `rhs` is a tensor of shape `[..., M, K]`. -- -- The output is a tensor of shape `[..., M, K]`. If `adjoint` is -- `True` then the innermost matrices in output` satisfy matrix equations -- `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`. -- If `adjoint` is `False` then the strictly then the innermost matrices in -- `output` satisfy matrix equations -- `adjoint(matrix[..., i, k]) * output[..., k, j] = rhs[..., i, j]`. matrixTriangularSolve :: forall v'1 v'2 t . (OneOf '[Double, Float] t) => Tensor v'1 t -- ^ __matrix__: Shape is `[..., M, M]`. -> Tensor v'2 t -- ^ __rhs__: Shape is `[..., M, K]`. -> Tensor Build t -- ^ __output__: Shape is `[..., M, K]`. matrixTriangularSolve = matrixTriangularSolve' id matrixTriangularSolve' :: forall v'1 v'2 t . (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __matrix__: Shape is `[..., M, M]`. -> Tensor v'2 t -- ^ __rhs__: Shape is `[..., M, K]`. -> Tensor Build t -- ^ __output__: Shape is `[..., M, K]`. matrixTriangularSolve' op'options matrix rhs | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs matrix, buildInputs rhs] return (opDef "MatrixTriangularSolve" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "matrix" description: "Shape is `[..., M, M]`." type_attr: "T" } input_arg { name: "rhs" description: "Shape is `[..., M, K]`." type_attr: "T" } output_arg { name: "output" description: "Shape is `[..., M, K]`." type_attr: "T" } attr { name: "lower" type: "bool" default_value { b: true } description: "Boolean indicating whether the innermost matrices in `matrix` are\nlower or upper triangular." } attr { name: "adjoint" type: "bool" default_value { b: false } description: "Boolean indicating whether to solve with `matrix` or its (block-wise)\n adjoint.\n\n@compatibility(numpy)\nEquivalent to np.linalg.triangular_solve\n@end_compatibility" } attr { name: "T" type: "type" allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } } } -} -- | Computes the maximum of elements across dimensions of a tensor. -- -- Reduces `input` along the dimensions given in `reduction_indices`. Unless -- `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in -- `reduction_indices`. If `keep_dims` is true, the reduced dimensions are -- retained with length 1. max :: forall v'1 v'2 t tidx . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => Tensor v'1 t -- ^ __input__: The tensor to reduce. -> Tensor v'2 tidx -- ^ __reduction_indices__: The dimensions to reduce. -> Tensor Build t -- ^ __output__: The reduced tensor. max = max' id max' :: forall v'1 v'2 t tidx . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => OpParams -> Tensor v'1 t -- ^ __input__: The tensor to reduce. -> Tensor v'2 tidx -- ^ __reduction_indices__: The dimensions to reduce. -> Tensor Build t -- ^ __output__: The reduced tensor. max' op'options input reduction_indices | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs reduction_indices] return (opDef "Max" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tidx" .~ tensorType (undefined :: tidx) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "The tensor to reduce." type_attr: "T" } input_arg { name: "reduction_indices" description: "The dimensions to reduce." type_attr: "Tidx" } output_arg { name: "output" description: "The reduced tensor." type_attr: "T" } attr { name: "keep_dims" type: "bool" default_value { b: false } description: "If true, retain reduced dimensions with length 1." } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "Tidx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | Performs max pooling on the input. maxPool :: forall v'1 t . (OneOf '[Data.Word.Word16, Float] t) => Tensor v'1 t -- ^ __input__: 4-D input to pool over. -> Tensor Build t -- ^ __output__: The max pooled output tensor. maxPool = maxPool' id maxPool' :: forall v'1 t . (OneOf '[Data.Word.Word16, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__: 4-D input to pool over. -> Tensor Build t -- ^ __output__: The max pooled output tensor. maxPool' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "MaxPool" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "4-D input to pool over." type_attr: "T" } output_arg { name: "output" description: "The max pooled output tensor." type_attr: "T" } attr { name: "T" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_FLOAT type: DT_HALF } } } attr { name: "ksize" type: "list(int)" description: "The size of the window for each dimension of the input tensor." has_minimum: true minimum: 4 } attr { name: "strides" type: "list(int)" description: "The stride of the sliding window for each dimension of the\ninput tensor." has_minimum: true minimum: 4 } attr { name: "padding" type: "string" description: "The type of padding algorithm to use." allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "data_format" type: "string" default_value { s: "NHWC" } description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n [batch, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n [batch, in_channels, in_height, in_width]." allowed_values { list { s: "NHWC" s: "NCHW" } } } -} -- | Performs 3D max pooling on the input. maxPool3D :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __input__: Shape `[batch, depth, rows, cols, channels]` tensor to pool over. -> Tensor Build t -- ^ __output__: The max pooled output tensor. maxPool3D = maxPool3D' id maxPool3D' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__: Shape `[batch, depth, rows, cols, channels]` tensor to pool over. -> Tensor Build t -- ^ __output__: The max pooled output tensor. maxPool3D' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "MaxPool3D" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "Shape `[batch, depth, rows, cols, channels]` tensor to pool over." type_attr: "T" } output_arg { name: "output" description: "The max pooled output tensor." type_attr: "T" } attr { name: "ksize" type: "list(int)" description: "1-D tensor of length 5. The size of the window for each dimension of\nthe input tensor. Must have `ksize[0] = ksize[4] = 1`." has_minimum: true minimum: 5 } attr { name: "strides" type: "list(int)" description: "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`." has_minimum: true minimum: 5 } attr { name: "padding" type: "string" description: "The type of padding algorithm to use." allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } -} -- | Computes gradients of max pooling function. maxPool3DGrad :: forall v'1 v'2 v'3 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 Float -- ^ __orig_input__: The original input tensor. -> Tensor v'2 Float -- ^ __orig_output__: The original output tensor. -> Tensor v'3 t -- ^ __grad__: Output backprop of shape `[batch, depth, rows, cols, channels]`. -> Tensor Build t -- ^ __output__ maxPool3DGrad = maxPool3DGrad' id maxPool3DGrad' :: forall v'1 v'2 v'3 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 Float -- ^ __orig_input__: The original input tensor. -> Tensor v'2 Float -- ^ __orig_output__: The original output tensor. -> Tensor v'3 t -- ^ __grad__: Output backprop of shape `[batch, depth, rows, cols, channels]`. -> Tensor Build t -- ^ __output__ maxPool3DGrad' op'options orig_input orig_output grad | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs orig_input, buildInputs orig_output, buildInputs grad] return (opDef "MaxPool3DGrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "orig_input" description: "The original input tensor." type: DT_FLOAT } input_arg { name: "orig_output" description: "The original output tensor." type: DT_FLOAT } input_arg { name: "grad" description: "Output backprop of shape `[batch, depth, rows, cols, channels]`." type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "ksize" type: "list(int)" description: "1-D tensor of length 5. The size of the window for each dimension of\nthe input tensor. Must have `ksize[0] = ksize[4] = 1`." has_minimum: true minimum: 5 } attr { name: "strides" type: "list(int)" description: "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`." has_minimum: true minimum: 5 } attr { name: "padding" type: "string" description: "The type of padding algorithm to use." allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } -} -- | Computes gradients of the maxpooling function. maxPoolGrad :: forall v'1 v'2 v'3 t . (OneOf '[Data.Word.Word16, Float] t) => Tensor v'1 t -- ^ __orig_input__: The original input tensor. -> Tensor v'2 t -- ^ __orig_output__: The original output tensor. -> Tensor v'3 t -- ^ __grad__: 4-D. Gradients w.r.t. the output of `max_pool`. -> Tensor Build t -- ^ __output__: Gradients w.r.t. the input to `max_pool`. maxPoolGrad = maxPoolGrad' id maxPoolGrad' :: forall v'1 v'2 v'3 t . (OneOf '[Data.Word.Word16, Float] t) => OpParams -> Tensor v'1 t -- ^ __orig_input__: The original input tensor. -> Tensor v'2 t -- ^ __orig_output__: The original output tensor. -> Tensor v'3 t -- ^ __grad__: 4-D. Gradients w.r.t. the output of `max_pool`. -> Tensor Build t -- ^ __output__: Gradients w.r.t. the input to `max_pool`. maxPoolGrad' op'options orig_input orig_output grad | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs orig_input, buildInputs orig_output, buildInputs grad] return (opDef "MaxPoolGrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "orig_input" description: "The original input tensor." type_attr: "T" } input_arg { name: "orig_output" description: "The original output tensor." type_attr: "T" } input_arg { name: "grad" description: "4-D. Gradients w.r.t. the output of `max_pool`." type_attr: "T" } output_arg { name: "output" description: "Gradients w.r.t. the input to `max_pool`." type_attr: "T" } attr { name: "ksize" type: "list(int)" description: "The size of the window for each dimension of the input tensor." has_minimum: true minimum: 4 } attr { name: "strides" type: "list(int)" description: "The stride of the sliding window for each dimension of the\ninput tensor." has_minimum: true minimum: 4 } attr { name: "padding" type: "string" description: "The type of padding algorithm to use." allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "data_format" type: "string" default_value { s: "NHWC" } description: "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n [batch, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n [batch, in_channels, in_height, in_width]." allowed_values { list { s: "NHWC" s: "NCHW" } } } attr { name: "T" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_FLOAT type: DT_HALF } } } -} -- | Computes gradients of the maxpooling function. maxPoolGradWithArgmax :: forall v'1 v'2 v'3 targmax t . (OneOf '[Data.Int.Int32, Data.Int.Int64] targmax, OneOf '[Data.Word.Word16, Float] t) => Tensor v'1 t -- ^ __input__: The original input. -> Tensor v'2 t -- ^ __grad__: 4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. the -- output of `max_pool`. -> Tensor v'3 targmax -- ^ __argmax__: The indices of the maximum values chosen for each output of `max_pool`. -> Tensor Build t -- ^ __output__: Gradients w.r.t. the input of `max_pool`. maxPoolGradWithArgmax = maxPoolGradWithArgmax' id maxPoolGradWithArgmax' :: forall v'1 v'2 v'3 targmax t . (OneOf '[Data.Int.Int32, Data.Int.Int64] targmax, OneOf '[Data.Word.Word16, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__: The original input. -> Tensor v'2 t -- ^ __grad__: 4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. the -- output of `max_pool`. -> Tensor v'3 targmax -- ^ __argmax__: The indices of the maximum values chosen for each output of `max_pool`. -> Tensor Build t -- ^ __output__: Gradients w.r.t. the input of `max_pool`. maxPoolGradWithArgmax' op'options input grad argmax | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs grad, buildInputs argmax] return (opDef "MaxPoolGradWithArgmax" & opAttr "Targmax" .~ tensorType (undefined :: targmax) & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "The original input." type_attr: "T" } input_arg { name: "grad" description: "4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. the\noutput of `max_pool`." type_attr: "T" } input_arg { name: "argmax" description: "The indices of the maximum values chosen for each output of `max_pool`." type_attr: "Targmax" } output_arg { name: "output" description: "Gradients w.r.t. the input of `max_pool`." type_attr: "T" } attr { name: "ksize" type: "list(int)" description: "The size of the window for each dimension of the input tensor." has_minimum: true minimum: 4 } attr { name: "strides" type: "list(int)" description: "The stride of the sliding window for each dimension of the\ninput tensor." has_minimum: true minimum: 4 } attr { name: "padding" type: "string" description: "The type of padding algorithm to use." allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "Targmax" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "T" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_FLOAT type: DT_HALF } } } -} -- | Performs max pooling on the input and outputs both max values and indices. -- -- The indices in `argmax` are flattened, so that a maximum value at position -- `[b, y, x, c]` becomes flattened index -- `((b * height + y) * width + x) * channels + c`. maxPoolWithArgmax :: forall v'1 targmax t . (OneOf '[Data.Int.Int32, Data.Int.Int64] targmax, OneOf '[Data.Word.Word16, Float] t) => Tensor v'1 t -- ^ __input__: 4-D with shape `[batch, height, width, channels]`. Input to pool over. -> (Tensor Build t, Tensor Build targmax) -- ^ (__output__, __argmax__) -- -- * __output__: The max pooled output tensor. -- -- * __argmax__: 4-D. The flattened indices of the max values chosen for each output. maxPoolWithArgmax = maxPoolWithArgmax' id maxPoolWithArgmax' :: forall v'1 targmax t . (OneOf '[Data.Int.Int32, Data.Int.Int64] targmax, OneOf '[Data.Word.Word16, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__: 4-D with shape `[batch, height, width, channels]`. Input to pool over. -> (Tensor Build t, Tensor Build targmax) -- ^ (__output__, __argmax__) -- -- * __output__: The max pooled output tensor. -- -- * __argmax__: 4-D. The flattened indices of the max values chosen for each output. maxPoolWithArgmax' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "MaxPoolWithArgmax" & opAttr "Targmax" .~ tensorType (undefined :: targmax) & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "4-D with shape `[batch, height, width, channels]`. Input to pool over." type_attr: "T" } output_arg { name: "output" description: "The max pooled output tensor." type_attr: "T" } output_arg { name: "argmax" description: "4-D. The flattened indices of the max values chosen for each output." type_attr: "Targmax" } attr { name: "ksize" type: "list(int)" description: "The size of the window for each dimension of the input tensor." has_minimum: true minimum: 4 } attr { name: "strides" type: "list(int)" description: "The stride of the sliding window for each dimension of the\ninput tensor." has_minimum: true minimum: 4 } attr { name: "Targmax" type: "type" default_value { type: DT_INT64 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "padding" type: "string" description: "The type of padding algorithm to use." allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "T" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_FLOAT type: DT_HALF } } } -} -- | Returns the max of x and y (i.e. x > y ? x : y) element-wise. -- -- *NOTE*: `Maximum` supports broadcasting. More about broadcasting -- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) maximum :: forall v'1 v'2 t . (OneOf '[Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ maximum = maximum' id maximum' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ maximum' op'options x y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y] return (opDef "Maximum" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 } } } -} -- | Computes the mean of elements across dimensions of a tensor. -- -- Reduces `input` along the dimensions given in `reduction_indices`. Unless -- `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in -- `reduction_indices`. If `keep_dims` is true, the reduced dimensions are -- retained with length 1. mean :: forall v'1 v'2 t tidx . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => Tensor v'1 t -- ^ __input__: The tensor to reduce. -> Tensor v'2 tidx -- ^ __reduction_indices__: The dimensions to reduce. -> Tensor Build t -- ^ __output__: The reduced tensor. mean = mean' id mean' :: forall v'1 v'2 t tidx . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => OpParams -> Tensor v'1 t -- ^ __input__: The tensor to reduce. -> Tensor v'2 tidx -- ^ __reduction_indices__: The dimensions to reduce. -> Tensor Build t -- ^ __output__: The reduced tensor. mean' op'options input reduction_indices | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs reduction_indices] return (opDef "Mean" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tidx" .~ tensorType (undefined :: tidx) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "The tensor to reduce." type_attr: "T" } input_arg { name: "reduction_indices" description: "The dimensions to reduce." type_attr: "Tidx" } output_arg { name: "output" description: "The reduced tensor." type_attr: "T" } attr { name: "keep_dims" type: "bool" default_value { b: false } description: "If true, retain reduced dimensions with length 1." } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "Tidx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | Forwards the value of an available tensor from `inputs` to `output`. -- -- `Merge` waits for at least one of the tensors in `inputs` to become available. -- It is usually combined with `Switch` to implement branching. -- -- `Merge` forwards the first tensor for become available to `output`, and sets -- `value_index` to its index in `inputs`. merge :: forall v'1 t . (TensorType t) => [Tensor v'1 t] -- ^ __inputs__: The input tensors, exactly one of which will become available. -> (Tensor Build t, Tensor Build Data.Int.Int32) -- ^ (__output__, __value_index__) -- -- * __output__: Will be set to the available input tensor. -- -- * __value_index__: The index of the chosen input tensor in `inputs`. merge = merge' id merge' :: forall v'1 t . (TensorType t) => OpParams -> [Tensor v'1 t] -- ^ __inputs__: The input tensors, exactly one of which will become available. -> (Tensor Build t, Tensor Build Data.Int.Int32) -- ^ (__output__, __value_index__) -- -- * __output__: Will be set to the available input tensor. -- -- * __value_index__: The index of the chosen input tensor in `inputs`. merge' op'options inputs | eqLengthGuard [("N", [("inputs", length inputs)])] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs inputs] return (opDef "Merge" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "N" .~ n & op'options & opInputs .~ op'inputs) where n = fromIntegral (length inputs) :: Int64 {- input_arg { name: "inputs" description: "The input tensors, exactly one of which will become available." type_attr: "T" number_attr: "N" } output_arg { name: "output" description: "Will be set to the available input tensor." type_attr: "T" } output_arg { name: "value_index" description: "The index of the chosen input tensor in `inputs`." type: DT_INT32 } attr { name: "T" type: "type" } attr { name: "N" type: "int" has_minimum: true minimum: 1 } -} -- | Merges summaries. -- -- This op creates a -- [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto) -- protocol buffer that contains the union of all the values in the input -- summaries. -- -- When the Op is run, it reports an `InvalidArgument` error if multiple values -- in the summaries to merge use the same tag. mergeSummary :: [Tensor v'1 Data.ByteString.ByteString] -- ^ __inputs__: Can be of any shape. Each must contain serialized `Summary` protocol -- buffers. -> Tensor Build Data.ByteString.ByteString -- ^ __summary__: Scalar. Serialized `Summary` protocol buffer. mergeSummary = mergeSummary' id mergeSummary' :: OpParams -> [Tensor v'1 Data.ByteString.ByteString] -- ^ __inputs__: Can be of any shape. Each must contain serialized `Summary` protocol -- buffers. -> Tensor Build Data.ByteString.ByteString -- ^ __summary__: Scalar. Serialized `Summary` protocol buffer. mergeSummary' op'options inputs | eqLengthGuard [("N", [("inputs", length inputs)])] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs inputs] return (opDef "MergeSummary" & opAttr "N" .~ n & op'options & opInputs .~ op'inputs) where n = fromIntegral (length inputs) :: Int64 {- input_arg { name: "inputs" description: "Can be of any shape. Each must contain serialized `Summary` protocol\nbuffers." type: DT_STRING number_attr: "N" } output_arg { name: "summary" description: "Scalar. Serialized `Summary` protocol buffer." type: DT_STRING } attr { name: "N" type: "int" has_minimum: true minimum: 1 } -} -- | V2 format specific: merges the metadata files of sharded checkpoints. The -- -- result is one logical checkpoint, with one physical metadata file and renamed -- data files. -- -- Intended for "grouping" multiple checkpoints in a sharded checkpoint setup. -- -- If delete_old_dirs is true, attempts to delete recursively the dirname of each -- path in the input checkpoint_prefixes. This is useful when those paths are non -- user-facing temporary locations. mergeV2Checkpoints :: forall v'1 v'2 m' . (MonadBuild m') => Tensor v'1 Data.ByteString.ByteString -- ^ __checkpoint_prefixes__: prefixes of V2 checkpoints to merge. -> Tensor v'2 Data.ByteString.ByteString -- ^ __destination_prefix__: scalar. The desired final prefix. Allowed to be the same -- as one of the checkpoint_prefixes. -> m' (ControlNode) mergeV2Checkpoints = mergeV2Checkpoints' id mergeV2Checkpoints' :: forall v'1 v'2 m' . (MonadBuild m') => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __checkpoint_prefixes__: prefixes of V2 checkpoints to merge. -> Tensor v'2 Data.ByteString.ByteString -- ^ __destination_prefix__: scalar. The desired final prefix. Allowed to be the same -- as one of the checkpoint_prefixes. -> m' (ControlNode) mergeV2Checkpoints' op'options checkpoint_prefixes destination_prefix | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs checkpoint_prefixes, buildInputs destination_prefix] buildOp [] (opDef "MergeV2Checkpoints" & op'options & opInputs .~ op'inputs) {- input_arg { name: "checkpoint_prefixes" description: "prefixes of V2 checkpoints to merge." type: DT_STRING } input_arg { name: "destination_prefix" description: "scalar. The desired final prefix. Allowed to be the same\nas one of the checkpoint_prefixes." type: DT_STRING } attr { name: "delete_old_dirs" type: "bool" default_value { b: true } description: "see above." } -} -- | Computes the minimum of elements across dimensions of a tensor. -- -- Reduces `input` along the dimensions given in `reduction_indices`. Unless -- `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in -- `reduction_indices`. If `keep_dims` is true, the reduced dimensions are -- retained with length 1. min :: forall v'1 v'2 t tidx . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => Tensor v'1 t -- ^ __input__: The tensor to reduce. -> Tensor v'2 tidx -- ^ __reduction_indices__: The dimensions to reduce. -> Tensor Build t -- ^ __output__: The reduced tensor. min = min' id min' :: forall v'1 v'2 t tidx . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => OpParams -> Tensor v'1 t -- ^ __input__: The tensor to reduce. -> Tensor v'2 tidx -- ^ __reduction_indices__: The dimensions to reduce. -> Tensor Build t -- ^ __output__: The reduced tensor. min' op'options input reduction_indices | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs reduction_indices] return (opDef "Min" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tidx" .~ tensorType (undefined :: tidx) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "The tensor to reduce." type_attr: "T" } input_arg { name: "reduction_indices" description: "The dimensions to reduce." type_attr: "Tidx" } output_arg { name: "output" description: "The reduced tensor." type_attr: "T" } attr { name: "keep_dims" type: "bool" default_value { b: false } description: "If true, retain reduced dimensions with length 1." } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "Tidx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | Returns the min of x and y (i.e. x < y ? x : y) element-wise. -- -- *NOTE*: `Minimum` supports broadcasting. More about broadcasting -- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) minimum :: forall v'1 v'2 t . (OneOf '[Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ minimum = minimum' id minimum' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ minimum' op'options x y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y] return (opDef "Minimum" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 } } } -} -- | Pads a tensor with mirrored values. -- -- This operation pads a `input` with mirrored values according to the `paddings` -- you specify. `paddings` is an integer tensor with shape `[n, 2]`, where n is -- the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates -- how many values to add before the contents of `input` in that dimension, and -- `paddings[D, 1]` indicates how many values to add after the contents of `input` -- in that dimension. Both `paddings[D, 0]` and `paddings[D, 1]` must be no greater -- than `input.dim_size(D)` (or `input.dim_size(D) - 1`) if `copy_border` is true -- (if false, respectively). -- -- The padded size of each dimension D of the output is: -- -- `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)` -- -- For example: -- -- ```prettyprint -- # 't' is [[1, 2, 3], [4, 5, 6]]. -- # 'paddings' is [[1, 1]], [2, 2]]. -- # 'mode' is SYMMETRIC. -- # rank of 't' is 2. -- pad(t, paddings) ==> [[2, 1, 1, 2, 3, 3, 2] -- [2, 1, 1, 2, 3, 3, 2] -- [5, 4, 4, 5, 6, 6, 5] -- [5, 4, 4, 5, 6, 6, 5]] -- ``` mirrorPad :: forall v'1 v'2 t tpaddings . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tpaddings) => Tensor v'1 t -- ^ __input__: The input tensor to be padded. -> Tensor v'2 tpaddings -- ^ __paddings__: A two-column matrix specifying the padding sizes. The number of -- rows must be the same as the rank of `input`. -> Tensor Build t -- ^ __output__: The padded tensor. mirrorPad = mirrorPad' id mirrorPad' :: forall v'1 v'2 t tpaddings . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tpaddings) => OpParams -> Tensor v'1 t -- ^ __input__: The input tensor to be padded. -> Tensor v'2 tpaddings -- ^ __paddings__: A two-column matrix specifying the padding sizes. The number of -- rows must be the same as the rank of `input`. -> Tensor Build t -- ^ __output__: The padded tensor. mirrorPad' op'options input paddings | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs paddings] return (opDef "MirrorPad" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tpaddings" .~ tensorType (undefined :: tpaddings) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "The input tensor to be padded." type_attr: "T" } input_arg { name: "paddings" description: "A two-column matrix specifying the padding sizes. The number of\nrows must be the same as the rank of `input`." type_attr: "Tpaddings" } output_arg { name: "output" description: "The padded tensor." type_attr: "T" } attr { name: "T" type: "type" } attr { name: "Tpaddings" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "mode" type: "string" description: "Either `REFLECT` or `SYMMETRIC`. In reflect mode the padded regions\ndo not include the borders, while in symmetric mode the padded regions\ndo include the borders. For example, if `input` is `[1, 2, 3]` and `paddings`\nis `[0, 2]`, then the output is `[1, 2, 3, 2, 1]` in reflect mode, and\nit is `[1, 2, 3, 3, 2]` in symmetric mode." allowed_values { list { s: "REFLECT" s: "SYMMETRIC" } } } -} -- | Gradient op for `MirrorPad` op. This op folds a mirror-padded tensor. -- -- This operation folds the padded areas of `input` by `MirrorPad` according to the -- `paddings` you specify. `paddings` must be the same as `paddings` argument -- given to the corresponding `MirrorPad` op. -- -- The folded size of each dimension D of the output is: -- -- `input.dim_size(D) - paddings(D, 0) - paddings(D, 1)` -- -- For example: -- -- ```prettyprint -- # 't' is [[1, 2, 3], [4, 5, 6], [7, 8, 9]]. -- # 'paddings' is [[0, 1]], [0, 1]]. -- # 'mode' is SYMMETRIC. -- # rank of 't' is 2. -- pad(t, paddings) ==> [[ 1, 5] -- [11, 28]] -- ``` mirrorPadGrad :: forall v'1 v'2 t tpaddings . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tpaddings) => Tensor v'1 t -- ^ __input__: The input tensor to be folded. -> Tensor v'2 tpaddings -- ^ __paddings__: A two-column matrix specifying the padding sizes. The number of -- rows must be the same as the rank of `input`. -> Tensor Build t -- ^ __output__: The folded tensor. mirrorPadGrad = mirrorPadGrad' id mirrorPadGrad' :: forall v'1 v'2 t tpaddings . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tpaddings) => OpParams -> Tensor v'1 t -- ^ __input__: The input tensor to be folded. -> Tensor v'2 tpaddings -- ^ __paddings__: A two-column matrix specifying the padding sizes. The number of -- rows must be the same as the rank of `input`. -> Tensor Build t -- ^ __output__: The folded tensor. mirrorPadGrad' op'options input paddings | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs paddings] return (opDef "MirrorPadGrad" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tpaddings" .~ tensorType (undefined :: tpaddings) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "The input tensor to be folded." type_attr: "T" } input_arg { name: "paddings" description: "A two-column matrix specifying the padding sizes. The number of\nrows must be the same as the rank of `input`." type_attr: "Tpaddings" } output_arg { name: "output" description: "The folded tensor." type_attr: "T" } attr { name: "T" type: "type" } attr { name: "Tpaddings" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "mode" type: "string" description: "The mode used in the `MirrorPad` op." allowed_values { list { s: "REFLECT" s: "SYMMETRIC" } } } -} -- | Returns element-wise remainder of division. -- -- *NOTE*: `Mod` supports broadcasting. More about broadcasting -- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) mod :: forall v'1 v'2 t . (OneOf '[Data.Int.Int32, Data.Int.Int64, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ mod = mod' id mod' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int32, Data.Int.Int64, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ mod' op'options x y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y] return (opDef "Mod" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 type: DT_FLOAT type: DT_DOUBLE } } } -} -- | Returns x * y element-wise. -- -- *NOTE*: `Mul` supports broadcasting. More about broadcasting -- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) mul :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ mul = mul' id mul' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ mul' op'options x y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y] return (opDef "Mul" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_UINT8 type: DT_INT8 type: DT_UINT16 type: DT_INT16 type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | Draws samples from a multinomial distribution. multinomial :: forall v'1 v'2 t m' . (MonadBuild m', OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __logits__: 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, :]` -- represents the unnormalized log probabilities for all classes. -> Tensor v'2 Data.Int.Int32 -- ^ __num_samples__: 0-D. Number of independent samples to draw for each row slice. -> m' (Tensor Value Data.Int.Int64) -- ^ __output__: 2-D Tensor with shape `[batch_size, num_samples]`. Each slice `[i, :]` -- contains the drawn class labels with range `[0, num_classes)`. multinomial = multinomial' id multinomial' :: forall v'1 v'2 t m' . (MonadBuild m', OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __logits__: 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, :]` -- represents the unnormalized log probabilities for all classes. -> Tensor v'2 Data.Int.Int32 -- ^ __num_samples__: 0-D. Number of independent samples to draw for each row slice. -> m' (Tensor Value Data.Int.Int64) -- ^ __output__: 2-D Tensor with shape `[batch_size, num_samples]`. Each slice `[i, :]` -- contains the drawn class labels with range `[0, num_classes)`. multinomial' op'options logits num_samples | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs logits, buildInputs num_samples] buildOp [] (opDef "Multinomial" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "logits" description: "2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, :]`\nrepresents the unnormalized log probabilities for all classes." type_attr: "T" } input_arg { name: "num_samples" description: "0-D. Number of independent samples to draw for each row slice." type: DT_INT32 } output_arg { name: "output" description: "2-D Tensor with shape `[batch_size, num_samples]`. Each slice `[i, :]`\ncontains the drawn class labels with range `[0, num_classes)`." type: DT_INT64 } attr { name: "seed" type: "int" default_value { i: 0 } description: "If either seed or seed2 is set to be non-zero, the internal random number\ngenerator is seeded by the given seed. Otherwise, a random seed is used." } attr { name: "seed2" type: "int" default_value { i: 0 } description: "A second seed to avoid seed collision." } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF } } } -} -- | Creates an empty hash table that uses tensors as the backing store. It uses -- -- "open addressing" with quadratic reprobing to resolve collisions. -- -- This op creates a mutable hash table, specifying the type of its keys and -- values. Each value must be a scalar. Data can be inserted into the table using -- the insert operations. It does not support the initialization operation. mutableDenseHashTable :: forall v'1 key_dtype m' . (MonadBuild m', TensorType key_dtype) => DataType -- ^ __value_dtype__: Type of the table values. -> Tensor v'1 key_dtype -- ^ __empty_key__: The key used to represent empty key buckets internally. Must not -- be used in insert or lookup operations. -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __table_handle__: Handle to a table. mutableDenseHashTable = mutableDenseHashTable' id mutableDenseHashTable' :: forall v'1 key_dtype m' . (MonadBuild m', TensorType key_dtype) => OpParams -> DataType -- ^ __value_dtype__: Type of the table values. -> Tensor v'1 key_dtype -- ^ __empty_key__: The key used to represent empty key buckets internally. Must not -- be used in insert or lookup operations. -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __table_handle__: Handle to a table. mutableDenseHashTable' op'options value_dtype empty_key | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs empty_key] buildOp [] (opDef "MutableDenseHashTable" & opAttr "key_dtype" .~ tensorType (undefined :: key_dtype) & opAttr "value_dtype" .~ value_dtype & op'options & opInputs .~ op'inputs) {- input_arg { name: "empty_key" description: "The key used to represent empty key buckets internally. Must not\nbe used in insert or lookup operations." type_attr: "key_dtype" } output_arg { name: "table_handle" description: "Handle to a table." type: DT_STRING is_ref: true } attr { name: "container" type: "string" default_value { s: "" } description: "If non-empty, this table is placed in the given container.\nOtherwise, a default container is used." } attr { name: "shared_name" type: "string" default_value { s: "" } description: "If non-empty, this table is shared under the given name across\nmultiple sessions." } attr { name: "use_node_name_sharing" type: "bool" default_value { b: false } } attr { name: "key_dtype" type: "type" description: "Type of the table keys." } attr { name: "value_dtype" type: "type" description: "Type of the table values." } attr { name: "value_shape" type: "shape" default_value { shape { } } description: "The shape of each value." } attr { name: "initial_num_buckets" type: "int" default_value { i: 131072 } description: "The initial number of hash table buckets. Must be a power\nto 2." } attr { name: "max_load_factor" type: "float" default_value { f: 0.8 } description: "The maximum ratio between number of entries and number of\nbuckets before growing the table. Must be between 0 and 1." } -} -- | Creates an empty hash table. -- -- This op creates a mutable hash table, specifying the type of its keys and -- values. Each value must be a scalar. Data can be inserted into the table using -- the insert operations. It does not support the initialization operation. mutableHashTable :: forall m' . (MonadBuild m') => DataType -- ^ __key_dtype__: Type of the table keys. -> DataType -- ^ __value_dtype__: Type of the table values. -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __table_handle__: Handle to a table. mutableHashTable = mutableHashTable' id mutableHashTable' :: forall m' . (MonadBuild m') => OpParams -> DataType -- ^ __key_dtype__: Type of the table keys. -> DataType -- ^ __value_dtype__: Type of the table values. -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __table_handle__: Handle to a table. mutableHashTable' op'options key_dtype value_dtype | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "MutableHashTable" & opAttr "key_dtype" .~ key_dtype & opAttr "value_dtype" .~ value_dtype & op'options & opInputs .~ op'inputs) {- output_arg { name: "table_handle" description: "Handle to a table." type: DT_STRING is_ref: true } attr { name: "container" type: "string" default_value { s: "" } description: "If non-empty, this table is placed in the given container.\nOtherwise, a default container is used." } attr { name: "shared_name" type: "string" default_value { s: "" } description: "If non-empty, this table is shared under the given name across\nmultiple sessions." } attr { name: "use_node_name_sharing" type: "bool" default_value { b: false } description: "If true and shared_name is empty, the table is shared\nusing the node name." } attr { name: "key_dtype" type: "type" description: "Type of the table keys." } attr { name: "value_dtype" type: "type" description: "Type of the table values." } -} -- | Creates an empty hash table. -- -- This op creates a mutable hash table, specifying the type of its keys and -- values. Each value must be a vector. Data can be inserted into the table using -- the insert operations. It does not support the initialization operation. mutableHashTableOfTensors :: forall m' . (MonadBuild m') => DataType -- ^ __key_dtype__: Type of the table keys. -> DataType -- ^ __value_dtype__: Type of the table values. -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __table_handle__: Handle to a table. mutableHashTableOfTensors = mutableHashTableOfTensors' id mutableHashTableOfTensors' :: forall m' . (MonadBuild m') => OpParams -> DataType -- ^ __key_dtype__: Type of the table keys. -> DataType -- ^ __value_dtype__: Type of the table values. -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __table_handle__: Handle to a table. mutableHashTableOfTensors' op'options key_dtype value_dtype | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "MutableHashTableOfTensors" & opAttr "key_dtype" .~ key_dtype & opAttr "value_dtype" .~ value_dtype & op'options & opInputs .~ op'inputs) {- output_arg { name: "table_handle" description: "Handle to a table." type: DT_STRING is_ref: true } attr { name: "container" type: "string" default_value { s: "" } description: "If non-empty, this table is placed in the given container.\nOtherwise, a default container is used." } attr { name: "shared_name" type: "string" default_value { s: "" } description: "If non-empty, this table is shared under the given name across\nmultiple sessions." } attr { name: "use_node_name_sharing" type: "bool" default_value { b: false } } attr { name: "key_dtype" type: "type" description: "Type of the table keys." } attr { name: "value_dtype" type: "type" description: "Type of the table values." } attr { name: "value_shape" type: "shape" default_value { shape { } } } -} -- | Computes numerical negative value element-wise. -- -- I.e., \\(y = -x\\). neg :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ neg = neg' id neg' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ neg' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "Neg" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | Training via negative sampling. negTrain :: forall v'3 v'4 v'5 m' . (MonadBuild m') => Data.Int.Int64 -- ^ __num_negative_samples__: Number of negative samples per example. -> Tensor Ref Float -- ^ __w_in__: input word embedding. -> Tensor Ref Float -- ^ __w_out__: output word embedding. -> Tensor v'3 Data.Int.Int32 -- ^ __examples__: A vector of word ids. -> Tensor v'4 Data.Int.Int32 -- ^ __labels__: A vector of word ids. -> Tensor v'5 Float -- ^ __lr__ -> m' (ControlNode) negTrain = negTrain' id negTrain' :: forall v'3 v'4 v'5 m' . (MonadBuild m') => OpParams -> Data.Int.Int64 -- ^ __num_negative_samples__: Number of negative samples per example. -> Tensor Ref Float -- ^ __w_in__: input word embedding. -> Tensor Ref Float -- ^ __w_out__: output word embedding. -> Tensor v'3 Data.Int.Int32 -- ^ __examples__: A vector of word ids. -> Tensor v'4 Data.Int.Int32 -- ^ __labels__: A vector of word ids. -> Tensor v'5 Float -- ^ __lr__ -> m' (ControlNode) negTrain' op'options num_negative_samples w_in w_out examples labels lr | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs w_in, buildInputs w_out, buildInputs examples, buildInputs labels, buildInputs lr] buildOp [] (opDef "NegTrain" & opAttr "num_negative_samples" .~ num_negative_samples & op'options & opInputs .~ op'inputs) {- input_arg { name: "w_in" description: "input word embedding." type: DT_FLOAT is_ref: true } input_arg { name: "w_out" description: "output word embedding." type: DT_FLOAT is_ref: true } input_arg { name: "examples" description: "A vector of word ids." type: DT_INT32 } input_arg { name: "labels" description: "A vector of word ids." type: DT_INT32 } input_arg { name: "lr" type: DT_FLOAT } attr { name: "vocab_count" type: "list(int)" description: "Count of words in the vocabulary." } attr { name: "num_negative_samples" type: "int" description: "Number of negative samples per example." } -} -- | Makes its input available to the next iteration. nextIteration :: forall v'1 t . (TensorType t) => Tensor v'1 t -- ^ __data__: The tensor to be made available to the next iteration. -> Tensor Build t -- ^ __output__: The same tensor as `data`. nextIteration = nextIteration' id nextIteration' :: forall v'1 t . (TensorType t) => OpParams -> Tensor v'1 t -- ^ __data__: The tensor to be made available to the next iteration. -> Tensor Build t -- ^ __output__: The same tensor as `data`. nextIteration' op'options data' | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs data'] return (opDef "NextIteration" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "data" description: "The tensor to be made available to the next iteration." type_attr: "T" } output_arg { name: "output" description: "The same tensor as `data`." type_attr: "T" } attr { name: "T" type: "type" } -} -- | Does nothing. Only useful as a placeholder for control edges. noOp :: forall m' . (MonadBuild m') => m' (ControlNode) noOp = noOp' id noOp' :: forall m' . (MonadBuild m') => OpParams -> m' (ControlNode) noOp' op'options | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "NoOp" & op'options & opInputs .~ op'inputs) {- -} -- | Greedily selects a subset of bounding boxes in descending order of score, -- -- pruning away boxes that have high intersection-over-union (IOU) overlap -- with previously selected boxes. Bounding boxes are supplied as -- [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any -- diagonal pair of box corners and the coordinates can be provided as normalized -- (i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm -- is agnostic to where the origin is in the coordinate system. Note that this -- algorithm is invariant to orthogonal transformations and translations -- of the coordinate system; thus translating or reflections of the coordinate -- system result in the same boxes being selected by the algorithm. -- -- The output of this operation is a set of integers indexing into the input -- collection of bounding boxes representing the selected boxes. The bounding -- box coordinates corresponding to the selected indices can then be obtained -- using the `tf.gather operation`. For example: -- -- selected_indices = tf.image.non_max_suppression( -- boxes, scores, max_output_size, iou_threshold) -- selected_boxes = tf.gather(boxes, selected_indices) nonMaxSuppression :: Tensor v'1 Float -- ^ __boxes__: A 2-D float tensor of shape `[num_boxes, 4]`. -> Tensor v'2 Float -- ^ __scores__: A 1-D float tensor of shape `[num_boxes]` representing a single -- score corresponding to each box (each row of boxes). -> Tensor v'3 Data.Int.Int32 -- ^ __max_output_size__: A scalar integer tensor representing the maximum number of -- boxes to be selected by non max suppression. -> Tensor Build Data.Int.Int32 -- ^ __selected_indices__: A 1-D integer tensor of shape `[M]` representing the selected -- indices from the boxes tensor, where `M <= max_output_size`. nonMaxSuppression = nonMaxSuppression' id nonMaxSuppression' :: OpParams -> Tensor v'1 Float -- ^ __boxes__: A 2-D float tensor of shape `[num_boxes, 4]`. -> Tensor v'2 Float -- ^ __scores__: A 1-D float tensor of shape `[num_boxes]` representing a single -- score corresponding to each box (each row of boxes). -> Tensor v'3 Data.Int.Int32 -- ^ __max_output_size__: A scalar integer tensor representing the maximum number of -- boxes to be selected by non max suppression. -> Tensor Build Data.Int.Int32 -- ^ __selected_indices__: A 1-D integer tensor of shape `[M]` representing the selected -- indices from the boxes tensor, where `M <= max_output_size`. nonMaxSuppression' op'options boxes scores max_output_size | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs boxes, buildInputs scores, buildInputs max_output_size] return (opDef "NonMaxSuppression" & op'options & opInputs .~ op'inputs) {- input_arg { name: "boxes" description: "A 2-D float tensor of shape `[num_boxes, 4]`." type: DT_FLOAT } input_arg { name: "scores" description: "A 1-D float tensor of shape `[num_boxes]` representing a single\nscore corresponding to each box (each row of boxes)." type: DT_FLOAT } input_arg { name: "max_output_size" description: "A scalar integer tensor representing the maximum number of\nboxes to be selected by non max suppression." type: DT_INT32 } output_arg { name: "selected_indices" description: "A 1-D integer tensor of shape `[M]` representing the selected\nindices from the boxes tensor, where `M <= max_output_size`." type: DT_INT32 } attr { name: "iou_threshold" type: "float" default_value { f: 0.5 } description: "A float representing the threshold for deciding whether boxes\noverlap too much with respect to IOU." } -} -- | Returns the truth value of (x != y) element-wise. -- -- *NOTE*: `NotEqual` supports broadcasting. More about broadcasting -- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) notEqual :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Bool, Data.ByteString.ByteString, Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build Bool -- ^ __z__ notEqual = notEqual' id notEqual' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Bool, Data.ByteString.ByteString, Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build Bool -- ^ __z__ notEqual' op'options x y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y] return (opDef "NotEqual" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type: DT_BOOL } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_UINT8 type: DT_INT8 type: DT_INT16 type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_QUINT8 type: DT_QINT8 type: DT_QINT32 type: DT_STRING type: DT_BOOL type: DT_COMPLEX128 } } } -} -- | Returns a one-hot tensor. -- -- The locations represented by indices in `indices` take value `on_value`, -- while all other locations take value `off_value`. -- -- If the input `indices` is rank `N`, the output will have rank `N+1`, -- The new axis is created at dimension `axis` (default: the new axis is -- appended at the end). -- -- If `indices` is a scalar the output shape will be a vector of length `depth`. -- -- If `indices` is a vector of length `features`, the output shape will be: -- ``` -- features x depth if axis == -1 -- depth x features if axis == 0 -- ``` -- -- If `indices` is a matrix (batch) with shape `[batch, features]`, -- the output shape will be: -- ``` -- batch x features x depth if axis == -1 -- batch x depth x features if axis == 1 -- depth x batch x features if axis == 0 -- ``` -- -- -- Examples -- ========= -- -- Suppose that -- -- ``` -- indices = [0, 2, -1, 1] -- depth = 3 -- on_value = 5.0 -- off_value = 0.0 -- axis = -1 -- ``` -- -- Then output is `[4 x 3]`: -- -- ```output = -- [5.0 0.0 0.0] // one_hot(0) -- [0.0 0.0 5.0] // one_hot(2) -- [0.0 0.0 0.0] // one_hot(-1) -- [0.0 5.0 0.0] // one_hot(1) -- ``` -- -- Suppose that -- -- ``` -- indices = [0, 2, -1, 1] -- depth = 3 -- on_value = 0.0 -- off_value = 3.0 -- axis = 0 -- ``` -- -- Then output is `[3 x 4]`: -- -- ```output = -- [0.0 3.0 3.0 3.0] -- [3.0 3.0 3.0 0.0] -- [3.0 3.0 3.0 3.0] -- [3.0 0.0 3.0 3.0] -- // ^ one_hot(0) -- // ^ one_hot(2) -- // ^ one_hot(-1) -- // ^ one_hot(1) -- ``` -- Suppose that -- -- ``` -- indices = [[0, 2], [1, -1]] -- depth = 3 -- on_value = 1.0 -- off_value = 0.0 -- axis = -1 -- ``` -- -- Then output is `[2 x 2 x 3]`: -- -- ```output = -- [ -- [1.0, 0.0, 0.0] // one_hot(0) -- [0.0, 0.0, 1.0] // one_hot(2) -- ][ -- [0.0, 1.0, 0.0] // one_hot(1) -- [0.0, 0.0, 0.0] // one_hot(-1) -- ]``` oneHot :: forall v'1 v'2 v'3 v'4 t tI . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64, Data.Word.Word8] tI) => Tensor v'1 tI -- ^ __indices__: A tensor of indices. -> Tensor v'2 Data.Int.Int32 -- ^ __depth__: A scalar defining the depth of the one hot dimension. -> Tensor v'3 t -- ^ __on_value__: A scalar defining the value to fill in output when `indices[j] = i`. -> Tensor v'4 t -- ^ __off_value__: A scalar defining the value to fill in output when `indices[j] != i`. -> Tensor Build t -- ^ __output__: The one-hot tensor. oneHot = oneHot' id oneHot' :: forall v'1 v'2 v'3 v'4 t tI . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64, Data.Word.Word8] tI) => OpParams -> Tensor v'1 tI -- ^ __indices__: A tensor of indices. -> Tensor v'2 Data.Int.Int32 -- ^ __depth__: A scalar defining the depth of the one hot dimension. -> Tensor v'3 t -- ^ __on_value__: A scalar defining the value to fill in output when `indices[j] = i`. -> Tensor v'4 t -- ^ __off_value__: A scalar defining the value to fill in output when `indices[j] != i`. -> Tensor Build t -- ^ __output__: The one-hot tensor. oneHot' op'options indices depth on_value off_value | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs indices, buildInputs depth, buildInputs on_value, buildInputs off_value] return (opDef "OneHot" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "TI" .~ tensorType (undefined :: tI) & op'options & opInputs .~ op'inputs) {- input_arg { name: "indices" description: "A tensor of indices." type_attr: "TI" } input_arg { name: "depth" description: "A scalar defining the depth of the one hot dimension." type: DT_INT32 } input_arg { name: "on_value" description: "A scalar defining the value to fill in output when `indices[j] = i`." type_attr: "T" } input_arg { name: "off_value" description: "A scalar defining the value to fill in output when `indices[j] != i`." type_attr: "T" } output_arg { name: "output" description: "The one-hot tensor." type_attr: "T" } attr { name: "axis" type: "int" default_value { i: -1 } description: "The axis to fill (default: -1, a new inner-most axis)." } attr { name: "T" type: "type" } attr { name: "TI" type: "type" default_value { type: DT_INT64 } allowed_values { list { type: DT_UINT8 type: DT_INT32 type: DT_INT64 } } } -} -- | Packs a list of `N` rank-`R` tensors into one rank-`(R+1)` tensor. -- -- Packs the `N` tensors in `values` into a tensor with rank one higher than each -- tensor in `values`, by packing them along the `axis` dimension. -- Given a list of tensors of shape `(A, B, C)`; -- -- if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`. -- if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`. -- Etc. -- -- For example: -- -- ```prettyprint -- # 'x' is [1, 4] -- # 'y' is [2, 5] -- # 'z' is [3, 6] -- pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim. -- pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]] -- ``` -- -- This is the opposite of `unpack`. pack :: forall v'1 t . (TensorType t) => [Tensor v'1 t] -- ^ __values__: Must be of same shape and type. -> Tensor Build t -- ^ __output__: The packed tensor. pack = pack' id pack' :: forall v'1 t . (TensorType t) => OpParams -> [Tensor v'1 t] -- ^ __values__: Must be of same shape and type. -> Tensor Build t -- ^ __output__: The packed tensor. pack' op'options values | eqLengthGuard [("N", [("values", length values)])] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs values] return (opDef "Pack" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "N" .~ n & op'options & opInputs .~ op'inputs) where n = fromIntegral (length values) :: Int64 {- input_arg { name: "values" description: "Must be of same shape and type." type_attr: "T" number_attr: "N" } output_arg { name: "output" description: "The packed tensor." type_attr: "T" } attr { name: "N" type: "int" has_minimum: true minimum: 1 } attr { name: "T" type: "type" } attr { name: "axis" type: "int" default_value { i: 0 } description: "Dimension along which to pack. Negative values wrap around, so the\nvalid range is `[-(R+1), R+1)`." } -} -- | Pads a tensor with zeros. -- -- This operation pads a `input` with zeros according to the `paddings` you -- specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is the -- rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates -- how many zeros to add before the contents of `input` in that dimension, and -- `paddings[D, 1]` indicates how many zeros to add after the contents of `input` -- in that dimension. -- -- The padded size of each dimension D of the output is: -- -- `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)` -- -- For example: -- -- ```prettyprint -- # 't' is [[1, 1], [2, 2]] -- # 'paddings' is [[1, 1], [2, 2]] -- # rank of 't' is 2 -- pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0] -- [0, 0, 1, 1, 0, 0] -- [0, 0, 2, 2, 0, 0] -- [0, 0, 0, 0, 0, 0]] -- ``` pad :: forall v'1 v'2 t tpaddings . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tpaddings) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 tpaddings -- ^ __paddings__ -> Tensor Build t -- ^ __output__ pad = pad' id pad' :: forall v'1 v'2 t tpaddings . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tpaddings) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 tpaddings -- ^ __paddings__ -> Tensor Build t -- ^ __output__ pad' op'options input paddings | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs paddings] return (opDef "Pad" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tpaddings" .~ tensorType (undefined :: tpaddings) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "paddings" type_attr: "Tpaddings" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "Tpaddings" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | A queue that produces elements in first-in first-out order. -- -- Variable-size shapes are allowed by setting the corresponding shape dimensions -- to 0 in the shape attr. In this case DequeueMany will pad up to the maximum -- size of any given element in the minibatch. See below for details. paddingFIFOQueue :: forall m' . (MonadBuild m') => [DataType] -- ^ __component_types__: The type of each component in a value. -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __handle__: The handle to the queue. paddingFIFOQueue = paddingFIFOQueue' id paddingFIFOQueue' :: forall m' . (MonadBuild m') => OpParams -> [DataType] -- ^ __component_types__: The type of each component in a value. -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __handle__: The handle to the queue. paddingFIFOQueue' op'options component_types | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "PaddingFIFOQueue" & opAttr "component_types" .~ component_types & op'options & opInputs .~ op'inputs) {- output_arg { name: "handle" description: "The handle to the queue." type: DT_STRING is_ref: true } attr { name: "component_types" type: "list(type)" description: "The type of each component in a value." has_minimum: true minimum: 1 } attr { name: "shapes" type: "list(shape)" default_value { list { } } description: "The shape of each component in a value. The length of this attr must\nbe either 0 or the same as the length of component_types.\nShapes of fixed rank but variable size are allowed by setting\nany shape dimension to -1. In this case, the inputs\' shape may vary along\nthe given dimension, and DequeueMany will pad the given dimension with\nzeros up to the maximum shape of all elements in the given batch.\nIf the length of this attr is 0, different queue elements may have\ndifferent ranks and shapes, but only one element may be dequeued at a time." has_minimum: true } attr { name: "capacity" type: "int" default_value { i: -1 } description: "The upper bound on the number of elements in this queue.\nNegative numbers mean no limit." } attr { name: "container" type: "string" default_value { s: "" } description: "If non-empty, this queue is placed in the given container.\nOtherwise, a default container is used." } attr { name: "shared_name" type: "string" default_value { s: "" } description: "If non-empty, this queue will be shared under the given name\nacross multiple sessions." } -} -- | A queue that produces elements in first-in first-out order. -- -- Variable-size shapes are allowed by setting the corresponding shape dimensions -- to 0 in the shape attr. In this case DequeueMany will pad up to the maximum -- size of any given element in the minibatch. See below for details. paddingFIFOQueueV2 :: forall m' . (MonadBuild m') => [DataType] -- ^ __component_types__: The type of each component in a value. -> m' (Tensor Value ResourceHandle) -- ^ __handle__: The handle to the queue. paddingFIFOQueueV2 = paddingFIFOQueueV2' id paddingFIFOQueueV2' :: forall m' . (MonadBuild m') => OpParams -> [DataType] -- ^ __component_types__: The type of each component in a value. -> m' (Tensor Value ResourceHandle) -- ^ __handle__: The handle to the queue. paddingFIFOQueueV2' op'options component_types | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "PaddingFIFOQueueV2" & opAttr "component_types" .~ component_types & op'options & opInputs .~ op'inputs) {- output_arg { name: "handle" description: "The handle to the queue." type: DT_RESOURCE } attr { name: "component_types" type: "list(type)" description: "The type of each component in a value." has_minimum: true minimum: 1 } attr { name: "shapes" type: "list(shape)" default_value { list { } } description: "The shape of each component in a value. The length of this attr must\nbe either 0 or the same as the length of component_types.\nShapes of fixed rank but variable size are allowed by setting\nany shape dimension to -1. In this case, the inputs\' shape may vary along\nthe given dimension, and DequeueMany will pad the given dimension with\nzeros up to the maximum shape of all elements in the given batch.\nIf the length of this attr is 0, different queue elements may have\ndifferent ranks and shapes, but only one element may be dequeued at a time." has_minimum: true } attr { name: "capacity" type: "int" default_value { i: -1 } description: "The upper bound on the number of elements in this queue.\nNegative numbers mean no limit." } attr { name: "container" type: "string" default_value { s: "" } description: "If non-empty, this queue is placed in the given container.\nOtherwise, a default container is used." } attr { name: "shared_name" type: "string" default_value { s: "" } description: "If non-empty, this queue will be shared under the given name\nacross multiple sessions." } -} -- | Concatenates a list of `N` tensors along the first dimension. -- -- The input tensors are all required to have size 1 in the first dimension. -- -- For example: -- -- ```prettyprint -- # 'x' is [[1, 4]] -- # 'y' is [[2, 5]] -- # 'z' is [[3, 6]] -- parallel_concat([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim. -- ``` -- -- The difference between concat and parallel_concat is that concat requires all -- of the inputs be computed before the operation will begin but doesn't require -- that the input shapes be known during graph construction. Parallel concat -- will copy pieces of the input into the output as they become available, in -- some situations this can provide a performance benefit. parallelConcat :: forall v'1 t . (TensorType t) => Shape -- ^ __shape__: the final shape of the result; should be equal to the shapes of any input -- but with the number of input values in the first dimension. -> [Tensor v'1 t] -- ^ __values__: Tensors to be concatenated. All must have size 1 in the first dimension -- and same shape. -> Tensor Build t -- ^ __output__: The concatenated tensor. parallelConcat = parallelConcat' id parallelConcat' :: forall v'1 t . (TensorType t) => OpParams -> Shape -- ^ __shape__: the final shape of the result; should be equal to the shapes of any input -- but with the number of input values in the first dimension. -> [Tensor v'1 t] -- ^ __values__: Tensors to be concatenated. All must have size 1 in the first dimension -- and same shape. -> Tensor Build t -- ^ __output__: The concatenated tensor. parallelConcat' op'options shape values | eqLengthGuard [("N", [("values", length values)])] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs values] return (opDef "ParallelConcat" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "shape" .~ shape & opAttr "N" .~ n & op'options & opInputs .~ op'inputs) where n = fromIntegral (length values) :: Int64 {- input_arg { name: "values" description: "Tensors to be concatenated. All must have size 1 in the first dimension\nand same shape." type_attr: "T" number_attr: "N" } output_arg { name: "output" description: "The concatenated tensor." type_attr: "T" } attr { name: "N" type: "int" has_minimum: true minimum: 1 } attr { name: "T" type: "type" } attr { name: "shape" type: "shape" description: "the final shape of the result; should be equal to the shapes of any input\nbut with the number of input values in the first dimension." } -} -- | Outputs random values from a normal distribution. The parameters may each be a -- -- scalar which applies to the entire output, or a vector of length shape[0] which -- stores the parameters for each batch. parameterizedTruncatedNormal :: forall v'1 v'2 v'3 v'4 v'5 dtype t m' . (MonadBuild m', OneOf '[Data.Word.Word16, Double, Float] dtype, OneOf '[Data.Int.Int32, Data.Int.Int64] t) => Tensor v'1 t -- ^ __shape__: The shape of the output tensor. Batches are indexed by the 0th dimension. -> Tensor v'2 dtype -- ^ __means__: The mean parameter of each batch. -> Tensor v'3 dtype -- ^ __stdevs__: The standard deviation parameter of each batch. Must be greater than 0. -> Tensor v'4 dtype -- ^ __minvals__: The minimum cutoff. May be -infinity. -> Tensor v'5 dtype -- ^ __maxvals__: The maximum cutoff. May be +infinity, and must be more than the minval -- for each batch. -> m' (Tensor Value dtype) -- ^ __output__: A matrix of shape num_batches x samples_per_batch, filled with random -- truncated normal values using the parameters for each row. parameterizedTruncatedNormal = parameterizedTruncatedNormal' id parameterizedTruncatedNormal' :: forall v'1 v'2 v'3 v'4 v'5 dtype t m' . (MonadBuild m', OneOf '[Data.Word.Word16, Double, Float] dtype, OneOf '[Data.Int.Int32, Data.Int.Int64] t) => OpParams -> Tensor v'1 t -- ^ __shape__: The shape of the output tensor. Batches are indexed by the 0th dimension. -> Tensor v'2 dtype -- ^ __means__: The mean parameter of each batch. -> Tensor v'3 dtype -- ^ __stdevs__: The standard deviation parameter of each batch. Must be greater than 0. -> Tensor v'4 dtype -- ^ __minvals__: The minimum cutoff. May be -infinity. -> Tensor v'5 dtype -- ^ __maxvals__: The maximum cutoff. May be +infinity, and must be more than the minval -- for each batch. -> m' (Tensor Value dtype) -- ^ __output__: A matrix of shape num_batches x samples_per_batch, filled with random -- truncated normal values using the parameters for each row. parameterizedTruncatedNormal' op'options shape means stdevs minvals maxvals | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs shape, buildInputs means, buildInputs stdevs, buildInputs minvals, buildInputs maxvals] buildOp [] (opDef "ParameterizedTruncatedNormal" & opAttr "dtype" .~ tensorType (undefined :: dtype) & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "shape" description: "The shape of the output tensor. Batches are indexed by the 0th dimension." type_attr: "T" } input_arg { name: "means" description: "The mean parameter of each batch." type_attr: "dtype" } input_arg { name: "stdevs" description: "The standard deviation parameter of each batch. Must be greater than 0." type_attr: "dtype" } input_arg { name: "minvals" description: "The minimum cutoff. May be -infinity." type_attr: "dtype" } input_arg { name: "maxvals" description: "The maximum cutoff. May be +infinity, and must be more than the minval\nfor each batch." type_attr: "dtype" } output_arg { name: "output" description: "A matrix of shape num_batches x samples_per_batch, filled with random\ntruncated normal values using the parameters for each row." type_attr: "dtype" } attr { name: "seed" type: "int" default_value { i: 0 } description: "If either `seed` or `seed2` are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed." } attr { name: "seed2" type: "int" default_value { i: 0 } description: "A second seed to avoid seed collision." } attr { name: "dtype" type: "type" description: "The type of the output." allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "T" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | Transforms a vector of brain.Example protos (as strings) into typed tensors. parseExample :: forall v'1 v'2 v'3 v'4 v'5 sparse_types tdense . (OneOfs '[Data.ByteString.ByteString, Data.Int.Int64, Float] sparse_types, OneOfs '[Data.ByteString.ByteString, Data.Int.Int64, Float] tdense) => Tensor v'1 Data.ByteString.ByteString -- ^ __serialized__: A vector containing a batch of binary serialized Example protos. -> Tensor v'2 Data.ByteString.ByteString -- ^ __names__: A vector containing the names of the serialized protos. -- May contain, for example, table key (descriptive) names for the -- corresponding serialized protos. These are purely useful for debugging -- purposes, and the presence of values here has no effect on the output. -- May also be an empty vector if no names are available. -- If non-empty, this vector must be the same length as "serialized". -> [Tensor v'3 Data.ByteString.ByteString] -- ^ __sparse_keys__: A list of Nsparse string Tensors (scalars). -- The keys expected in the Examples' features associated with sparse values. -> [Tensor v'4 Data.ByteString.ByteString] -- ^ __dense_keys__: A list of Ndense string Tensors (scalars). -- The keys expected in the Examples' features associated with dense values. -> TensorList (v'5) tdense -- ^ __dense_defaults__: A list of Ndense Tensors (some may be empty). -- dense_defaults[j] provides default values -- when the example's feature_map lacks dense_key[j]. If an empty Tensor is -- provided for dense_defaults[j], then the Feature dense_keys[j] is required. -- The input type is inferred from dense_defaults[j], even when it's empty. -- If dense_defaults[j] is not empty, and dense_shapes[j] is fully defined, -- then the shape of dense_defaults[j] must match that of dense_shapes[j]. -- If dense_shapes[j] has an undefined major dimension (variable strides dense -- feature), dense_defaults[j] must contain a single element: -- the padding element. -> ([Tensor Build Data.Int.Int64], TensorList (Build) sparse_types, [Tensor Build Data.Int.Int64], TensorList (Build) tdense) -- ^ (__sparse_indices__, __sparse_values__, __sparse_shapes__, __dense_values__) -- -- * __sparse_indices__ -- -- * __sparse_values__ -- -- * __sparse_shapes__ -- -- * __dense_values__ parseExample = parseExample' id parseExample' :: forall v'1 v'2 v'3 v'4 v'5 sparse_types tdense . (OneOfs '[Data.ByteString.ByteString, Data.Int.Int64, Float] sparse_types, OneOfs '[Data.ByteString.ByteString, Data.Int.Int64, Float] tdense) => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __serialized__: A vector containing a batch of binary serialized Example protos. -> Tensor v'2 Data.ByteString.ByteString -- ^ __names__: A vector containing the names of the serialized protos. -- May contain, for example, table key (descriptive) names for the -- corresponding serialized protos. These are purely useful for debugging -- purposes, and the presence of values here has no effect on the output. -- May also be an empty vector if no names are available. -- If non-empty, this vector must be the same length as "serialized". -> [Tensor v'3 Data.ByteString.ByteString] -- ^ __sparse_keys__: A list of Nsparse string Tensors (scalars). -- The keys expected in the Examples' features associated with sparse values. -> [Tensor v'4 Data.ByteString.ByteString] -- ^ __dense_keys__: A list of Ndense string Tensors (scalars). -- The keys expected in the Examples' features associated with dense values. -> TensorList (v'5) tdense -- ^ __dense_defaults__: A list of Ndense Tensors (some may be empty). -- dense_defaults[j] provides default values -- when the example's feature_map lacks dense_key[j]. If an empty Tensor is -- provided for dense_defaults[j], then the Feature dense_keys[j] is required. -- The input type is inferred from dense_defaults[j], even when it's empty. -- If dense_defaults[j] is not empty, and dense_shapes[j] is fully defined, -- then the shape of dense_defaults[j] must match that of dense_shapes[j]. -- If dense_shapes[j] has an undefined major dimension (variable strides dense -- feature), dense_defaults[j] must contain a single element: -- the padding element. -> ([Tensor Build Data.Int.Int64], TensorList (Build) sparse_types, [Tensor Build Data.Int.Int64], TensorList (Build) tdense) -- ^ (__sparse_indices__, __sparse_values__, __sparse_shapes__, __dense_values__) -- -- * __sparse_indices__ -- -- * __sparse_values__ -- -- * __sparse_shapes__ -- -- * __dense_values__ parseExample' op'options serialized names sparse_keys dense_keys dense_defaults | eqLengthGuard [("Nsparse", [("sparse_keys", length sparse_keys)]), ("Ndense", [("dense_keys", length dense_keys)])] = pureOp [nsparse, nsparse] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs serialized, buildInputs names, buildInputs sparse_keys, buildInputs dense_keys, buildInputs dense_defaults] return (opDef "ParseExample" & opAttr "sparse_types" .~ fromTensorTypes (Proxy :: Proxy sparse_types) & opAttr "Tdense" .~ fromTensorTypes (Proxy :: Proxy tdense) & opAttr "Nsparse" .~ nsparse & opAttr "Ndense" .~ ndense & op'options & opInputs .~ op'inputs) where nsparse = fromIntegral (length sparse_keys) :: Int64 ndense = fromIntegral (length dense_keys) :: Int64 {- input_arg { name: "serialized" description: "A vector containing a batch of binary serialized Example protos." type: DT_STRING } input_arg { name: "names" description: "A vector containing the names of the serialized protos.\nMay contain, for example, table key (descriptive) names for the\ncorresponding serialized protos. These are purely useful for debugging\npurposes, and the presence of values here has no effect on the output.\nMay also be an empty vector if no names are available.\nIf non-empty, this vector must be the same length as \"serialized\"." type: DT_STRING } input_arg { name: "sparse_keys" description: "A list of Nsparse string Tensors (scalars).\nThe keys expected in the Examples\' features associated with sparse values." type: DT_STRING number_attr: "Nsparse" } input_arg { name: "dense_keys" description: "A list of Ndense string Tensors (scalars).\nThe keys expected in the Examples\' features associated with dense values." type: DT_STRING number_attr: "Ndense" } input_arg { name: "dense_defaults" description: "A list of Ndense Tensors (some may be empty).\ndense_defaults[j] provides default values\nwhen the example\'s feature_map lacks dense_key[j]. If an empty Tensor is\nprovided for dense_defaults[j], then the Feature dense_keys[j] is required.\nThe input type is inferred from dense_defaults[j], even when it\'s empty.\nIf dense_defaults[j] is not empty, and dense_shapes[j] is fully defined,\nthen the shape of dense_defaults[j] must match that of dense_shapes[j].\nIf dense_shapes[j] has an undefined major dimension (variable strides dense\nfeature), dense_defaults[j] must contain a single element:\nthe padding element." type_list_attr: "Tdense" } output_arg { name: "sparse_indices" type: DT_INT64 number_attr: "Nsparse" } output_arg { name: "sparse_values" type_list_attr: "sparse_types" } output_arg { name: "sparse_shapes" type: DT_INT64 number_attr: "Nsparse" } output_arg { name: "dense_values" type_list_attr: "Tdense" } attr { name: "Nsparse" type: "int" has_minimum: true } attr { name: "Ndense" type: "int" has_minimum: true } attr { name: "sparse_types" type: "list(type)" description: "A list of Nsparse types; the data types of data in each Feature\ngiven in sparse_keys.\nCurrently the ParseExample supports DT_FLOAT (FloatList),\nDT_INT64 (Int64List), and DT_STRING (BytesList)." has_minimum: true allowed_values { list { type: DT_FLOAT type: DT_INT64 type: DT_STRING } } } attr { name: "Tdense" type: "list(type)" has_minimum: true allowed_values { list { type: DT_FLOAT type: DT_INT64 type: DT_STRING } } } attr { name: "dense_shapes" type: "list(shape)" description: "A list of Ndense shapes; the shapes of data in each Feature\ngiven in dense_keys.\nThe number of elements in the Feature corresponding to dense_key[j]\nmust always equal dense_shapes[j].NumEntries().\nIf dense_shapes[j] == (D0, D1, ..., DN) then the shape of output\nTensor dense_values[j] will be (|serialized|, D0, D1, ..., DN):\nThe dense outputs are just the inputs row-stacked by batch.\nThis works for dense_shapes[j] = (-1, D1, ..., DN). In this case\nthe shape of the output Tensor dense_values[j] will be\n(|serialized|, M, D1, .., DN), where M is the maximum number of blocks\nof elements of length D1 * .... * DN, across all minibatch entries\nin the input. Any minibatch entry with less than M blocks of elements of\nlength D1 * ... * DN will be padded with the corresponding default_value\nscalar element along the second dimension." has_minimum: true } -} -- | Transforms a scalar brain.SequenceExample proto (as strings) into typed tensors. parseSingleSequenceExample :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 context_sparse_types tcontext_dense feature_list_dense_types feature_list_sparse_types . (OneOfs '[Data.ByteString.ByteString, Data.Int.Int64, Float] context_sparse_types, OneOfs '[Data.ByteString.ByteString, Data.Int.Int64, Float] tcontext_dense, OneOfs '[Data.ByteString.ByteString, Data.Int.Int64, Float] feature_list_dense_types, OneOfs '[Data.ByteString.ByteString, Data.Int.Int64, Float] feature_list_sparse_types) => Tensor v'1 Data.ByteString.ByteString -- ^ __serialized__: A scalar containing a binary serialized SequenceExample proto. -> Tensor v'2 Data.ByteString.ByteString -- ^ __feature_list_dense_missing_assumed_empty__: A vector listing the -- FeatureList keys which may be missing from the SequenceExample. If the -- associated FeatureList is missing, it is treated as empty. By default, -- any FeatureList not listed in this vector must exist in the SequenceExample. -> [Tensor v'3 Data.ByteString.ByteString] -- ^ __context_sparse_keys__: A list of Ncontext_sparse string Tensors (scalars). -- The keys expected in the Examples' features associated with context_sparse -- values. -> [Tensor v'4 Data.ByteString.ByteString] -- ^ __context_dense_keys__: A list of Ncontext_dense string Tensors (scalars). -- The keys expected in the SequenceExamples' context features associated with -- dense values. -> [Tensor v'5 Data.ByteString.ByteString] -- ^ __feature_list_sparse_keys__: A list of Nfeature_list_sparse string Tensors -- (scalars). The keys expected in the FeatureLists associated with sparse -- values. -> [Tensor v'6 Data.ByteString.ByteString] -- ^ __feature_list_dense_keys__: A list of Nfeature_list_dense string Tensors (scalars). -- The keys expected in the SequenceExamples' feature_lists associated -- with lists of dense values. -> TensorList (v'7) tcontext_dense -- ^ __context_dense_defaults__: A list of Ncontext_dense Tensors (some may be empty). -- context_dense_defaults[j] provides default values -- when the SequenceExample's context map lacks context_dense_key[j]. -- If an empty Tensor is provided for context_dense_defaults[j], -- then the Feature context_dense_keys[j] is required. -- The input type is inferred from context_dense_defaults[j], even when it's -- empty. If context_dense_defaults[j] is not empty, its shape must match -- context_dense_shapes[j]. -> Tensor v'8 Data.ByteString.ByteString -- ^ __debug_name__: A scalar containing the name of the serialized proto. -- May contain, for example, table key (descriptive) name for the -- corresponding serialized proto. This is purely useful for debugging -- purposes, and the presence of values here has no effect on the output. -- May also be an empty scalar if no name is available. -> ([Tensor Build Data.Int.Int64], TensorList (Build) context_sparse_types, [Tensor Build Data.Int.Int64], TensorList (Build) tcontext_dense, [Tensor Build Data.Int.Int64], TensorList (Build) feature_list_sparse_types, [Tensor Build Data.Int.Int64], TensorList (Build) feature_list_dense_types) -- ^ (__context_sparse_indices__, __context_sparse_values__, __context_sparse_shapes__, __context_dense_values__, __feature_list_sparse_indices__, __feature_list_sparse_values__, __feature_list_sparse_shapes__, __feature_list_dense_values__) -- -- * __context_sparse_indices__ -- -- * __context_sparse_values__ -- -- * __context_sparse_shapes__ -- -- * __context_dense_values__ -- -- * __feature_list_sparse_indices__ -- -- * __feature_list_sparse_values__ -- -- * __feature_list_sparse_shapes__ -- -- * __feature_list_dense_values__ parseSingleSequenceExample = parseSingleSequenceExample' id parseSingleSequenceExample' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 context_sparse_types tcontext_dense feature_list_dense_types feature_list_sparse_types . (OneOfs '[Data.ByteString.ByteString, Data.Int.Int64, Float] context_sparse_types, OneOfs '[Data.ByteString.ByteString, Data.Int.Int64, Float] tcontext_dense, OneOfs '[Data.ByteString.ByteString, Data.Int.Int64, Float] feature_list_dense_types, OneOfs '[Data.ByteString.ByteString, Data.Int.Int64, Float] feature_list_sparse_types) => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __serialized__: A scalar containing a binary serialized SequenceExample proto. -> Tensor v'2 Data.ByteString.ByteString -- ^ __feature_list_dense_missing_assumed_empty__: A vector listing the -- FeatureList keys which may be missing from the SequenceExample. If the -- associated FeatureList is missing, it is treated as empty. By default, -- any FeatureList not listed in this vector must exist in the SequenceExample. -> [Tensor v'3 Data.ByteString.ByteString] -- ^ __context_sparse_keys__: A list of Ncontext_sparse string Tensors (scalars). -- The keys expected in the Examples' features associated with context_sparse -- values. -> [Tensor v'4 Data.ByteString.ByteString] -- ^ __context_dense_keys__: A list of Ncontext_dense string Tensors (scalars). -- The keys expected in the SequenceExamples' context features associated with -- dense values. -> [Tensor v'5 Data.ByteString.ByteString] -- ^ __feature_list_sparse_keys__: A list of Nfeature_list_sparse string Tensors -- (scalars). The keys expected in the FeatureLists associated with sparse -- values. -> [Tensor v'6 Data.ByteString.ByteString] -- ^ __feature_list_dense_keys__: A list of Nfeature_list_dense string Tensors (scalars). -- The keys expected in the SequenceExamples' feature_lists associated -- with lists of dense values. -> TensorList (v'7) tcontext_dense -- ^ __context_dense_defaults__: A list of Ncontext_dense Tensors (some may be empty). -- context_dense_defaults[j] provides default values -- when the SequenceExample's context map lacks context_dense_key[j]. -- If an empty Tensor is provided for context_dense_defaults[j], -- then the Feature context_dense_keys[j] is required. -- The input type is inferred from context_dense_defaults[j], even when it's -- empty. If context_dense_defaults[j] is not empty, its shape must match -- context_dense_shapes[j]. -> Tensor v'8 Data.ByteString.ByteString -- ^ __debug_name__: A scalar containing the name of the serialized proto. -- May contain, for example, table key (descriptive) name for the -- corresponding serialized proto. This is purely useful for debugging -- purposes, and the presence of values here has no effect on the output. -- May also be an empty scalar if no name is available. -> ([Tensor Build Data.Int.Int64], TensorList (Build) context_sparse_types, [Tensor Build Data.Int.Int64], TensorList (Build) tcontext_dense, [Tensor Build Data.Int.Int64], TensorList (Build) feature_list_sparse_types, [Tensor Build Data.Int.Int64], TensorList (Build) feature_list_dense_types) -- ^ (__context_sparse_indices__, __context_sparse_values__, __context_sparse_shapes__, __context_dense_values__, __feature_list_sparse_indices__, __feature_list_sparse_values__, __feature_list_sparse_shapes__, __feature_list_dense_values__) -- -- * __context_sparse_indices__ -- -- * __context_sparse_values__ -- -- * __context_sparse_shapes__ -- -- * __context_dense_values__ -- -- * __feature_list_sparse_indices__ -- -- * __feature_list_sparse_values__ -- -- * __feature_list_sparse_shapes__ -- -- * __feature_list_dense_values__ parseSingleSequenceExample' op'options serialized feature_list_dense_missing_assumed_empty context_sparse_keys context_dense_keys feature_list_sparse_keys feature_list_dense_keys context_dense_defaults debug_name | eqLengthGuard [("Ncontext_sparse", [("context_sparse_keys", length context_sparse_keys)]), ("Ncontext_dense", [("context_dense_keys", length context_dense_keys)]), ("Nfeature_list_sparse", [("feature_list_sparse_keys", length feature_list_sparse_keys)]), ("Nfeature_list_dense", [("feature_list_dense_keys", length feature_list_dense_keys)])] = pureOp [ncontext_sparse, ncontext_sparse, nfeature_list_sparse, nfeature_list_sparse] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs serialized, buildInputs feature_list_dense_missing_assumed_empty, buildInputs context_sparse_keys, buildInputs context_dense_keys, buildInputs feature_list_sparse_keys, buildInputs feature_list_dense_keys, buildInputs context_dense_defaults, buildInputs debug_name] return (opDef "ParseSingleSequenceExample" & opAttr "context_sparse_types" .~ fromTensorTypes (Proxy :: Proxy context_sparse_types) & opAttr "Tcontext_dense" .~ fromTensorTypes (Proxy :: Proxy tcontext_dense) & opAttr "feature_list_dense_types" .~ fromTensorTypes (Proxy :: Proxy feature_list_dense_types) & opAttr "feature_list_sparse_types" .~ fromTensorTypes (Proxy :: Proxy feature_list_sparse_types) & opAttr "Ncontext_sparse" .~ ncontext_sparse & opAttr "Ncontext_dense" .~ ncontext_dense & opAttr "Nfeature_list_sparse" .~ nfeature_list_sparse & opAttr "Nfeature_list_dense" .~ nfeature_list_dense & op'options & opInputs .~ op'inputs) where ncontext_sparse = fromIntegral (length context_sparse_keys) :: Int64 ncontext_dense = fromIntegral (length context_dense_keys) :: Int64 nfeature_list_sparse = fromIntegral (length feature_list_sparse_keys) :: Int64 nfeature_list_dense = fromIntegral (length feature_list_dense_keys) :: Int64 {- input_arg { name: "serialized" description: "A scalar containing a binary serialized SequenceExample proto." type: DT_STRING } input_arg { name: "feature_list_dense_missing_assumed_empty" description: "A vector listing the\nFeatureList keys which may be missing from the SequenceExample. If the\nassociated FeatureList is missing, it is treated as empty. By default,\nany FeatureList not listed in this vector must exist in the SequenceExample." type: DT_STRING } input_arg { name: "context_sparse_keys" description: "A list of Ncontext_sparse string Tensors (scalars).\nThe keys expected in the Examples\' features associated with context_sparse\nvalues." type: DT_STRING number_attr: "Ncontext_sparse" } input_arg { name: "context_dense_keys" description: "A list of Ncontext_dense string Tensors (scalars).\nThe keys expected in the SequenceExamples\' context features associated with\ndense values." type: DT_STRING number_attr: "Ncontext_dense" } input_arg { name: "feature_list_sparse_keys" description: "A list of Nfeature_list_sparse string Tensors\n(scalars). The keys expected in the FeatureLists associated with sparse\nvalues." type: DT_STRING number_attr: "Nfeature_list_sparse" } input_arg { name: "feature_list_dense_keys" description: "A list of Nfeature_list_dense string Tensors (scalars).\nThe keys expected in the SequenceExamples\' feature_lists associated\nwith lists of dense values." type: DT_STRING number_attr: "Nfeature_list_dense" } input_arg { name: "context_dense_defaults" description: "A list of Ncontext_dense Tensors (some may be empty).\ncontext_dense_defaults[j] provides default values\nwhen the SequenceExample\'s context map lacks context_dense_key[j].\nIf an empty Tensor is provided for context_dense_defaults[j],\nthen the Feature context_dense_keys[j] is required.\nThe input type is inferred from context_dense_defaults[j], even when it\'s\nempty. If context_dense_defaults[j] is not empty, its shape must match\ncontext_dense_shapes[j]." type_list_attr: "Tcontext_dense" } input_arg { name: "debug_name" description: "A scalar containing the name of the serialized proto.\nMay contain, for example, table key (descriptive) name for the\ncorresponding serialized proto. This is purely useful for debugging\npurposes, and the presence of values here has no effect on the output.\nMay also be an empty scalar if no name is available." type: DT_STRING } output_arg { name: "context_sparse_indices" type: DT_INT64 number_attr: "Ncontext_sparse" } output_arg { name: "context_sparse_values" type_list_attr: "context_sparse_types" } output_arg { name: "context_sparse_shapes" type: DT_INT64 number_attr: "Ncontext_sparse" } output_arg { name: "context_dense_values" type_list_attr: "Tcontext_dense" } output_arg { name: "feature_list_sparse_indices" type: DT_INT64 number_attr: "Nfeature_list_sparse" } output_arg { name: "feature_list_sparse_values" type_list_attr: "feature_list_sparse_types" } output_arg { name: "feature_list_sparse_shapes" type: DT_INT64 number_attr: "Nfeature_list_sparse" } output_arg { name: "feature_list_dense_values" type_list_attr: "feature_list_dense_types" } attr { name: "Ncontext_sparse" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "Ncontext_dense" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "Nfeature_list_sparse" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "Nfeature_list_dense" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "context_sparse_types" type: "list(type)" default_value { list { } } description: "A list of Ncontext_sparse types; the data types of data in\neach context Feature given in context_sparse_keys.\nCurrently the ParseSingleSequenceExample supports DT_FLOAT (FloatList),\nDT_INT64 (Int64List), and DT_STRING (BytesList)." has_minimum: true allowed_values { list { type: DT_FLOAT type: DT_INT64 type: DT_STRING } } } attr { name: "Tcontext_dense" type: "list(type)" default_value { list { } } has_minimum: true allowed_values { list { type: DT_FLOAT type: DT_INT64 type: DT_STRING } } } attr { name: "feature_list_dense_types" type: "list(type)" default_value { list { } } has_minimum: true allowed_values { list { type: DT_FLOAT type: DT_INT64 type: DT_STRING } } } attr { name: "context_dense_shapes" type: "list(shape)" default_value { list { } } description: "A list of Ncontext_dense shapes; the shapes of data in\neach context Feature given in context_dense_keys.\nThe number of elements in the Feature corresponding to context_dense_key[j]\nmust always equal context_dense_shapes[j].NumEntries().\nThe shape of context_dense_values[j] will match context_dense_shapes[j]." has_minimum: true } attr { name: "feature_list_sparse_types" type: "list(type)" default_value { list { } } description: "A list of Nfeature_list_sparse types; the data types\nof data in each FeatureList given in feature_list_sparse_keys.\nCurrently the ParseSingleSequenceExample supports DT_FLOAT (FloatList),\nDT_INT64 (Int64List), and DT_STRING (BytesList)." has_minimum: true allowed_values { list { type: DT_FLOAT type: DT_INT64 type: DT_STRING } } } attr { name: "feature_list_dense_shapes" type: "list(shape)" default_value { list { } } description: "A list of Nfeature_list_dense shapes; the shapes of\ndata in each FeatureList given in feature_list_dense_keys.\nThe shape of each Feature in the FeatureList corresponding to\nfeature_list_dense_key[j] must always equal\nfeature_list_dense_shapes[j].NumEntries()." has_minimum: true } -} -- | Transforms a serialized tensorflow.TensorProto proto into a Tensor. parseTensor :: forall v'1 out_type . (TensorType out_type) => Tensor v'1 Data.ByteString.ByteString -- ^ __serialized__: A scalar string containing a serialized TensorProto proto. -> Tensor Build out_type -- ^ __output__: A Tensor of type `out_type`. parseTensor = parseTensor' id parseTensor' :: forall v'1 out_type . (TensorType out_type) => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __serialized__: A scalar string containing a serialized TensorProto proto. -> Tensor Build out_type -- ^ __output__: A Tensor of type `out_type`. parseTensor' op'options serialized | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs serialized] return (opDef "ParseTensor" & opAttr "out_type" .~ tensorType (undefined :: out_type) & op'options & opInputs .~ op'inputs) {- input_arg { name: "serialized" description: "A scalar string containing a serialized TensorProto proto." type: DT_STRING } output_arg { name: "output" description: "A Tensor of type `out_type`." type_attr: "out_type" } attr { name: "out_type" type: "type" description: "The type of the serialized tensor. The provided type must match the\ntype of the serialized tensor and no implicit conversion will take place." } -} -- | A placeholder op for a value that will be fed into the computation. -- -- N.B. This operation will fail with an error if it is executed. It is -- intended as a way to represent a value that will always be fed, and to -- provide attrs that enable the fed value to be checked at runtime. placeholder :: forall dtype . (TensorType dtype) => Tensor Build dtype -- ^ __output__: A placeholder tensor that must be replaced using the feed mechanism. placeholder = placeholder' id placeholder' :: forall dtype . (TensorType dtype) => OpParams -> Tensor Build dtype -- ^ __output__: A placeholder tensor that must be replaced using the feed mechanism. placeholder' op'options | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] return (opDef "Placeholder" & opAttr "dtype" .~ tensorType (undefined :: dtype) & op'options & opInputs .~ op'inputs) {- output_arg { name: "output" description: "A placeholder tensor that must be replaced using the feed mechanism." type_attr: "dtype" } attr { name: "dtype" type: "type" description: "The type of elements in the tensor." } attr { name: "shape" type: "shape" default_value { shape { } } description: "(Optional) The shape of the tensor. If the shape has 0 dimensions, the\nshape is unconstrained." } -} -- | A placeholder op for a value that will be fed into the computation. -- -- N.B. This operation will fail with an error if it is executed. It is -- intended as a way to represent a value that will always be fed, and to -- provide attrs that enable the fed value to be checked at runtime. placeholderV2 :: forall dtype . (TensorType dtype) => Shape -- ^ __shape__: The shape of the tensor. The shape can be any partially-specified -- shape. To be unconstrained, pass in a shape with unknown rank. -> Tensor Build dtype -- ^ __output__: A placeholder tensor that must be replaced using the feed mechanism. placeholderV2 = placeholderV2' id placeholderV2' :: forall dtype . (TensorType dtype) => OpParams -> Shape -- ^ __shape__: The shape of the tensor. The shape can be any partially-specified -- shape. To be unconstrained, pass in a shape with unknown rank. -> Tensor Build dtype -- ^ __output__: A placeholder tensor that must be replaced using the feed mechanism. placeholderV2' op'options shape | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] return (opDef "PlaceholderV2" & opAttr "dtype" .~ tensorType (undefined :: dtype) & opAttr "shape" .~ shape & op'options & opInputs .~ op'inputs) {- output_arg { name: "output" description: "A placeholder tensor that must be replaced using the feed mechanism." type_attr: "dtype" } attr { name: "dtype" type: "type" description: "The type of elements in the tensor." } attr { name: "shape" type: "shape" description: "The shape of the tensor. The shape can be any partially-specified\nshape. To be unconstrained, pass in a shape with unknown rank." } -} -- | A placeholder op that passes through `input` when its output is not fed. placeholderWithDefault :: forall v'1 dtype . (TensorType dtype) => Shape -- ^ __shape__: The (possibly partial) shape of the tensor. -> Tensor v'1 dtype -- ^ __input__: The default value to produce when `output` is not fed. -> Tensor Build dtype -- ^ __output__: A placeholder tensor that defaults to `input` if it is not fed. placeholderWithDefault = placeholderWithDefault' id placeholderWithDefault' :: forall v'1 dtype . (TensorType dtype) => OpParams -> Shape -- ^ __shape__: The (possibly partial) shape of the tensor. -> Tensor v'1 dtype -- ^ __input__: The default value to produce when `output` is not fed. -> Tensor Build dtype -- ^ __output__: A placeholder tensor that defaults to `input` if it is not fed. placeholderWithDefault' op'options shape input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "PlaceholderWithDefault" & opAttr "dtype" .~ tensorType (undefined :: dtype) & opAttr "shape" .~ shape & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "The default value to produce when `output` is not fed." type_attr: "dtype" } output_arg { name: "output" description: "A placeholder tensor that defaults to `input` if it is not fed." type_attr: "dtype" } attr { name: "dtype" type: "type" description: "The type of elements in the tensor." } attr { name: "shape" type: "shape" description: "The (possibly partial) shape of the tensor." } -} -- | Compute the polygamma function \\(\psi^{(n)}(x)\\). -- -- The polygamma function is defined as: -- -- ``` -- \psi^{(n)}(x) = \frac{d^n}{dx^n} \psi(x) -- ``` -- where \\(\psi(x)\\) is the digamma function. polygamma :: forall v'1 v'2 t . (OneOf '[Double, Float] t) => Tensor v'1 t -- ^ __a__ -> Tensor v'2 t -- ^ __x__ -> Tensor Build t -- ^ __z__ polygamma = polygamma' id polygamma' :: forall v'1 v'2 t . (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __a__ -> Tensor v'2 t -- ^ __x__ -> Tensor Build t -- ^ __z__ polygamma' op'options a x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs a, buildInputs x] return (opDef "Polygamma" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "a" type_attr: "T" } input_arg { name: "x" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } -} -- | Computes the power of one value to another. -- -- Given a tensor `x` and a tensor `y`, this operation computes \\(x^y\\) for -- corresponding elements in `x` and `y`. For example: -- -- ``` -- # tensor 'x' is [[2, 2]], [3, 3]] -- # tensor 'y' is [[8, 16], [2, 3]] -- tf.pow(x, y) ==> [[256, 65536], [9, 27]] -- ``` pow :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ pow = pow' id pow' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ pow' op'options x y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y] return (opDef "Pow" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | An identity op that triggers an error if a gradient is requested. -- -- When executed in a graph, this op outputs its input tensor as-is. -- -- When building ops to compute gradients, the TensorFlow gradient system -- will return an error when trying to lookup the gradient of this op, -- because no gradient must ever be registered for this function. This -- op exists to prevent subtle bugs from silently returning unimplemented -- gradients in some corner cases. preventGradient :: forall v'1 t . (TensorType t) => Tensor v'1 t -- ^ __input__: any tensor. -> Tensor Build t -- ^ __output__: the same input tensor. preventGradient = preventGradient' id preventGradient' :: forall v'1 t . (TensorType t) => OpParams -> Tensor v'1 t -- ^ __input__: any tensor. -> Tensor Build t -- ^ __output__: the same input tensor. preventGradient' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "PreventGradient" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "any tensor." type_attr: "T" } output_arg { name: "output" description: "the same input tensor." type_attr: "T" } attr { name: "T" type: "type" } attr { name: "message" type: "string" default_value { s: "" } description: "Will be printed in the error when anyone tries to differentiate\nthis operation." } -} -- | Prints a list of tensors. -- -- Passes `input` through to `output` and prints `data` when evaluating. print :: forall v'1 v'2 t u m' . (MonadBuild m', TensorType t, TensorTypes u) => Tensor v'1 t -- ^ __input__: The tensor passed to `output` -> TensorList (v'2) u -- ^ __data__: A list of tensors to print out when op is evaluated. -> m' (Tensor Value t) -- ^ __output__: = The unmodified `input` tensor print = print' id print' :: forall v'1 v'2 t u m' . (MonadBuild m', TensorType t, TensorTypes u) => OpParams -> Tensor v'1 t -- ^ __input__: The tensor passed to `output` -> TensorList (v'2) u -- ^ __data__: A list of tensors to print out when op is evaluated. -> m' (Tensor Value t) -- ^ __output__: = The unmodified `input` tensor print' op'options input data' | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs data'] buildOp [] (opDef "Print" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "U" .~ fromTensorTypes (Proxy :: Proxy u) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "The tensor passed to `output`" type_attr: "T" } input_arg { name: "data" description: "A list of tensors to print out when op is evaluated." type_list_attr: "U" } output_arg { name: "output" description: "= The unmodified `input` tensor" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "U" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "message" type: "string" default_value { s: "" } description: "A string, prefix of the error message." } attr { name: "first_n" type: "int" default_value { i: -1 } description: "Only log `first_n` number of times. -1 disables logging." } attr { name: "summarize" type: "int" default_value { i: 3 } description: "Only print this many entries of each tensor." } -} -- | A queue that produces elements sorted by the first component value. -- -- Note that the PriorityQueue requires the first component of any element -- to be a scalar int64, in addition to the other elements declared by -- component_types. Therefore calls to Enqueue and EnqueueMany (resp. Dequeue -- and DequeueMany) on a PriorityQueue will all require (resp. output) one extra -- entry in their input (resp. output) lists. priorityQueue :: forall m' . (MonadBuild m') => m' (Tensor Ref Data.ByteString.ByteString) -- ^ __handle__: The handle to the queue. priorityQueue = priorityQueue' id priorityQueue' :: forall m' . (MonadBuild m') => OpParams -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __handle__: The handle to the queue. priorityQueue' op'options | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "PriorityQueue" & op'options & opInputs .~ op'inputs) {- output_arg { name: "handle" description: "The handle to the queue." type: DT_STRING is_ref: true } attr { name: "component_types" type: "list(type)" default_value { list { } } description: "The type of each component in a value." has_minimum: true } attr { name: "shapes" type: "list(shape)" description: "The shape of each component in a value. The length of this attr must\nbe either 0 or the same as the length of component_types. If the length of\nthis attr is 0, the shapes of queue elements are not constrained, and\nonly one element may be dequeued at a time." has_minimum: true } attr { name: "capacity" type: "int" default_value { i: -1 } description: "The upper bound on the number of elements in this queue.\nNegative numbers mean no limit." } attr { name: "container" type: "string" default_value { s: "" } description: "If non-empty, this queue is placed in the given container.\nOtherwise, a default container is used." } attr { name: "shared_name" type: "string" default_value { s: "" } description: "If non-empty, this queue will be shared under the given name\nacross multiple sessions." } -} -- | A queue that produces elements sorted by the first component value. -- -- Note that the PriorityQueue requires the first component of any element -- to be a scalar int64, in addition to the other elements declared by -- component_types. Therefore calls to Enqueue and EnqueueMany (resp. Dequeue -- and DequeueMany) on a PriorityQueue will all require (resp. output) one extra -- entry in their input (resp. output) lists. priorityQueueV2 :: forall m' . (MonadBuild m') => m' (Tensor Value ResourceHandle) -- ^ __handle__: The handle to the queue. priorityQueueV2 = priorityQueueV2' id priorityQueueV2' :: forall m' . (MonadBuild m') => OpParams -> m' (Tensor Value ResourceHandle) -- ^ __handle__: The handle to the queue. priorityQueueV2' op'options | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "PriorityQueueV2" & op'options & opInputs .~ op'inputs) {- output_arg { name: "handle" description: "The handle to the queue." type: DT_RESOURCE } attr { name: "component_types" type: "list(type)" default_value { list { } } description: "The type of each component in a value." has_minimum: true } attr { name: "shapes" type: "list(shape)" description: "The shape of each component in a value. The length of this attr must\nbe either 0 or the same as the length of component_types. If the length of\nthis attr is 0, the shapes of queue elements are not constrained, and\nonly one element may be dequeued at a time." has_minimum: true } attr { name: "capacity" type: "int" default_value { i: -1 } description: "The upper bound on the number of elements in this queue.\nNegative numbers mean no limit." } attr { name: "container" type: "string" default_value { s: "" } description: "If non-empty, this queue is placed in the given container.\nOtherwise, a default container is used." } attr { name: "shared_name" type: "string" default_value { s: "" } description: "If non-empty, this queue will be shared under the given name\nacross multiple sessions." } -} -- | Computes the product of elements across dimensions of a tensor. -- -- Reduces `input` along the dimensions given in `reduction_indices`. Unless -- `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in -- `reduction_indices`. If `keep_dims` is true, the reduced dimensions are -- retained with length 1. prod :: forall v'1 v'2 t tidx . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => Tensor v'1 t -- ^ __input__: The tensor to reduce. -> Tensor v'2 tidx -- ^ __reduction_indices__: The dimensions to reduce. -> Tensor Build t -- ^ __output__: The reduced tensor. prod = prod' id prod' :: forall v'1 v'2 t tidx . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => OpParams -> Tensor v'1 t -- ^ __input__: The tensor to reduce. -> Tensor v'2 tidx -- ^ __reduction_indices__: The dimensions to reduce. -> Tensor Build t -- ^ __output__: The reduced tensor. prod' op'options input reduction_indices | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs reduction_indices] return (opDef "Prod" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tidx" .~ tensorType (undefined :: tidx) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "The tensor to reduce." type_attr: "T" } input_arg { name: "reduction_indices" description: "The dimensions to reduce." type_attr: "Tidx" } output_arg { name: "output" description: "The reduced tensor." type_attr: "T" } attr { name: "keep_dims" type: "bool" default_value { b: false } description: "If true, retain reduced dimensions with length 1." } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "Tidx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | Computes the QR decompositions of one or more matrices. -- -- Computes the QR decomposition of each inner matrix in `tensor` such that -- `tensor[..., :, :] = q[..., :, :] * r[..., :,:])` -- -- ```prettyprint -- # a is a tensor. -- # q is a tensor of orthonormal matrices. -- # r is a tensor of upper triangular matrices. -- q, r = qr(a) -- q_full, r_full = qr(a, full_matrices=True) -- ``` qr :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Double, Float] t) => Tensor v'1 t -- ^ __input__: A tensor of shape `[..., M, N]` whose inner-most 2 dimensions -- form matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`. -> (Tensor Build t, Tensor Build t) -- ^ (__q__, __r__) -- -- * __q__: Orthonormal basis for range of `a`. If `full_matrices` is `False` then -- shape is `[..., M, P]`; if `full_matrices` is `True` then shape is -- `[..., M, M]`. -- -- * __r__: Triangular factor. If `full_matrices` is `False` then shape is -- `[..., P, N]`. If `full_matrices` is `True` then shape is `[..., M, N]`. qr = qr' id qr' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__: A tensor of shape `[..., M, N]` whose inner-most 2 dimensions -- form matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`. -> (Tensor Build t, Tensor Build t) -- ^ (__q__, __r__) -- -- * __q__: Orthonormal basis for range of `a`. If `full_matrices` is `False` then -- shape is `[..., M, P]`; if `full_matrices` is `True` then shape is -- `[..., M, M]`. -- -- * __r__: Triangular factor. If `full_matrices` is `False` then shape is -- `[..., P, N]`. If `full_matrices` is `True` then shape is `[..., M, N]`. qr' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "Qr" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "A tensor of shape `[..., M, N]` whose inner-most 2 dimensions\nform matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`." type_attr: "T" } output_arg { name: "q" description: "Orthonormal basis for range of `a`. If `full_matrices` is `False` then\nshape is `[..., M, P]`; if `full_matrices` is `True` then shape is\n`[..., M, M]`." type_attr: "T" } output_arg { name: "r" description: "Triangular factor. If `full_matrices` is `False` then shape is\n`[..., P, N]`. If `full_matrices` is `True` then shape is `[..., M, N]`." type_attr: "T" } attr { name: "full_matrices" type: "bool" default_value { b: false } description: "If true, compute full-sized `q` and `r`. If false\n(the default), compute only the leading `P` columns of `q`." } attr { name: "T" type: "type" allowed_values { list { type: DT_DOUBLE type: DT_FLOAT type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | Use QuantizeAndDequantizeV2 instead. quantizeAndDequantize :: forall v'1 t . (OneOf '[Double, Float] t) => Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ quantizeAndDequantize = quantizeAndDequantize' id quantizeAndDequantize' :: forall v'1 t . (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ quantizeAndDequantize' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "QuantizeAndDequantize" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "signed_input" type: "bool" default_value { b: true } } attr { name: "num_bits" type: "int" default_value { i: 8 } } attr { name: "range_given" type: "bool" default_value { b: false } } attr { name: "input_min" type: "float" default_value { f: 0.0 } } attr { name: "input_max" type: "float" default_value { f: 0.0 } } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } -} -- | Quantizes then dequantizes a tensor. -- -- This op simulates the precision loss from the quantized forward pass by: -- 1. Quantizing the tensor to fixed point numbers, which should match the target -- quantization method when it is used in inference. -- 2. Dequantizing it back to floating point numbers for the following ops, most -- likely matmul. -- -- There are different ways to quantize. This version does not use the full range -- of the output type, choosing to elide the lowest possible value for symmetry -- (e.g., output range is -127 to 127, not -128 to 127 for signed 8 bit -- quantization), so that 0.0 maps to 0. -- -- To perform this op, we first find the range of values in our tensor. The range -- we use is always centered on 0, so we find m such that -- -- 1. m = max(abs(input_min), abs(input_max)) if range_given is true, -- 2. m = max(abs(min_elem(input)), abs(max_elem(input))) otherwise. -- -- Our input tensor range is then [-m, m]. -- -- Next, we choose our fixed-point quantization buckets, [min_fixed, max_fixed]. -- If signed_input is true, this is -- -- [min_fixed, max_fixed ] = -- [-(1 << (num_bits - 1) - 1), (1 << (num_bits - 1)) - 1]. -- -- Otherwise, if signed_input is false, the fixed-point range is -- -- [min_fixed, max_fixed] = [0, (1 << num_bits) - 1]. -- -- From this we compute our scaling factor, s: -- -- s = (max_fixed - min_fixed) / (2 * m). -- -- Now we can quantize and dequantize the elements of our tensor. An element e -- is transformed into e': -- -- e' = (e * s).round_to_nearest() / s. -- -- Note that we have a different number of buckets in the signed vs. unsigned -- cases. For example, if num_bits == 8, we get 254 buckets in the signed case -- vs. 255 in the unsigned case. -- -- For example, suppose num_bits = 8 and m = 1. Then -- -- [min_fixed, max_fixed] = [-127, 127], and -- s = (127 + 127) / 2 = 127. -- -- Given the vector {-1, -0.5, 0, 0.3}, this is quantized to -- {-127, -63, 0, 38}, and dequantized to {-1, -63.0/127, 0, 38.0/127}. quantizeAndDequantizeV2 :: forall v'1 v'2 v'3 t . (OneOf '[Double, Float] t) => Tensor v'1 t -- ^ __input__: Tensor to quantize and then dequantize. -> Tensor v'2 t -- ^ __input_min__: If range_given, this is the min of the range, otherwise this input -- will be ignored. -> Tensor v'3 t -- ^ __input_max__: If range_given, this is the max of the range, otherwise this input -- will be ignored. -> Tensor Build t -- ^ __output__ quantizeAndDequantizeV2 = quantizeAndDequantizeV2' id quantizeAndDequantizeV2' :: forall v'1 v'2 v'3 t . (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__: Tensor to quantize and then dequantize. -> Tensor v'2 t -- ^ __input_min__: If range_given, this is the min of the range, otherwise this input -- will be ignored. -> Tensor v'3 t -- ^ __input_max__: If range_given, this is the max of the range, otherwise this input -- will be ignored. -> Tensor Build t -- ^ __output__ quantizeAndDequantizeV2' op'options input input_min input_max | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs input_min, buildInputs input_max] return (opDef "QuantizeAndDequantizeV2" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "Tensor to quantize and then dequantize." type_attr: "T" } input_arg { name: "input_min" description: "If range_given, this is the min of the range, otherwise this input\nwill be ignored." type_attr: "T" } input_arg { name: "input_max" description: "If range_given, this is the max of the range, otherwise this input\nwill be ignored." type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "signed_input" type: "bool" default_value { b: true } description: "If the quantization is signed or unsigned." } attr { name: "num_bits" type: "int" default_value { i: 8 } description: "The bitwidth of the quantization." } attr { name: "range_given" type: "bool" default_value { b: false } description: "If the range is given or should be computed from the tensor." } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } -} -- | Convert the quantized 'input' tensor into a lower-precision 'output', using the -- -- actual distribution of the values to maximize the usage of the lower bit depth -- and adjusting the output min and max ranges accordingly. -- -- [input_min, input_max] are scalar floats that specify the range for the float -- interpretation of the 'input' data. For example, if input_min is -1.0f and -- input_max is 1.0f, and we are dealing with quint16 quantized data, then a 0 -- value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f. -- -- This operator tries to squeeze as much precision as possible into an output with -- a lower bit depth by calculating the actual min and max values found in the -- data. For example, maybe that quint16 input has no values lower than 16,384 and -- none higher than 49,152. That means only half the range is actually needed, all -- the float interpretations are between -0.5f and 0.5f, so if we want to compress -- the data into a quint8 output, we can use that range rather than the theoretical -- -1.0f to 1.0f that is suggested by the input min and max. -- -- In practice, this is most useful for taking output from operations like -- QuantizedMatMul that can produce higher bit-depth outputs than their inputs and -- may have large potential output ranges, but in practice have a distribution of -- input values that only uses a small fraction of the possible range. By feeding -- that output into this operator, we can reduce it from 32 bits down to 8 with -- minimal loss of accuracy. quantizeDownAndShrinkRange :: forall v'1 v'2 v'3 tinput out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tinput, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] out_type) => Tensor v'1 tinput -- ^ __input__ -> Tensor v'2 Float -- ^ __input_min__: The float value that the minimum quantized input value represents. -> Tensor v'3 Float -- ^ __input_max__: The float value that the maximum quantized input value represents. -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __output_min__, __output_max__) -- -- * __output__ -- -- * __output_min__: The float value that the minimum quantized output value represents. -- -- * __output_max__: The float value that the maximum quantized output value represents. quantizeDownAndShrinkRange = quantizeDownAndShrinkRange' id quantizeDownAndShrinkRange' :: forall v'1 v'2 v'3 tinput out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tinput, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] out_type) => OpParams -> Tensor v'1 tinput -- ^ __input__ -> Tensor v'2 Float -- ^ __input_min__: The float value that the minimum quantized input value represents. -> Tensor v'3 Float -- ^ __input_max__: The float value that the maximum quantized input value represents. -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __output_min__, __output_max__) -- -- * __output__ -- -- * __output_min__: The float value that the minimum quantized output value represents. -- -- * __output_max__: The float value that the maximum quantized output value represents. quantizeDownAndShrinkRange' op'options input input_min input_max | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs input_min, buildInputs input_max] return (opDef "QuantizeDownAndShrinkRange" & opAttr "Tinput" .~ tensorType (undefined :: tinput) & opAttr "out_type" .~ tensorType (undefined :: out_type) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "Tinput" } input_arg { name: "input_min" description: "The float value that the minimum quantized input value represents." type: DT_FLOAT } input_arg { name: "input_max" description: "The float value that the maximum quantized input value represents." type: DT_FLOAT } output_arg { name: "output" type_attr: "out_type" } output_arg { name: "output_min" description: "The float value that the minimum quantized output value represents." type: DT_FLOAT } output_arg { name: "output_max" description: "The float value that the maximum quantized output value represents." type: DT_FLOAT } attr { name: "Tinput" type: "type" description: "The type of the input." allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT16 type: DT_QUINT16 type: DT_QINT32 } } } attr { name: "out_type" type: "type" description: "The type of the output. Should be a lower bit depth than Tinput." allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT16 type: DT_QUINT16 type: DT_QINT32 } } } -} -- | Quantize the 'input' tensor of type float to 'output' tensor of type 'T'. -- -- [min_range, max_range] are scalar floats that specify the range for -- the 'input' data. The 'mode' attribute controls exactly which calculations are -- used to convert the float values to their quantized equivalents. -- -- In 'MIN_COMBINED' mode, each value of the tensor will undergo the following: -- -- ``` -- out[i] = (in[i] - min_range) * range(T) / (max_range - min_range) -- if T == qint8, out[i] -= (range(T) + 1) / 2.0 -- ``` -- here `range(T) = numeric_limits::max() - numeric_limits::min()` -- -- *MIN_COMBINED Mode Example* -- -- Assume the input is type float and has a possible range of [0.0, 6.0] and the -- output type is quint8 ([0, 255]). The min_range and max_range values should be -- specified as 0.0 and 6.0. Quantizing from float to quint8 will multiply each -- value of the input by 255/6 and cast to quint8. -- -- If the output type was qint8 ([-128, 127]), the operation will additionally -- subtract each value by 128 prior to casting, so that the range of values aligns -- with the range of qint8. -- -- If the mode is 'MIN_FIRST', then this approach is used: -- -- ``` -- number_of_steps = 1 << (# of bits in T) -- range_adjust = number_of_steps / (number_of_steps - 1) -- range = (range_max - range_min) * range_adjust -- range_scale = number_of_steps / range -- quantized = round(input * range_scale) - round(range_min * range_scale) + -- numeric_limits::min() -- quantized = max(quantized, numeric_limits::min()) -- quantized = min(quantized, numeric_limits::max()) -- ``` -- -- The biggest difference between this and MIN_COMBINED is that the minimum range -- is rounded first, before it's subtracted from the rounded value. With -- MIN_COMBINED, a small bias is introduced where repeated iterations of quantizing -- and dequantizing will introduce a larger and larger error. -- -- One thing to watch out for is that the operator may choose to adjust the -- requested minimum and maximum values slightly during the quantization process, -- so you should always use the output ports as the range for further calculations. -- For example, if the requested minimum and maximum values are close to equal, -- they will be separated by a small epsilon value to prevent ill-formed quantized -- buffers from being created. Otherwise, you can end up with buffers where all the -- quantized values map to the same float value, which causes problems for -- operations that have to perform further calculations on them. quantizeV2 :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] t) => Tensor v'1 Float -- ^ __input__ -> Tensor v'2 Float -- ^ __min_range__: The minimum scalar value possibly produced for the input. -> Tensor v'3 Float -- ^ __max_range__: The maximum scalar value possibly produced for the input. -> (Tensor Build t, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __output_min__, __output_max__) -- -- * __output__: The quantized data produced from the float input. -- -- * __output_min__: The actual minimum scalar value used for the output. -- -- * __output_max__: The actual maximum scalar value used for the output. quantizeV2 = quantizeV2' id quantizeV2' :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] t) => OpParams -> Tensor v'1 Float -- ^ __input__ -> Tensor v'2 Float -- ^ __min_range__: The minimum scalar value possibly produced for the input. -> Tensor v'3 Float -- ^ __max_range__: The maximum scalar value possibly produced for the input. -> (Tensor Build t, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __output_min__, __output_max__) -- -- * __output__: The quantized data produced from the float input. -- -- * __output_min__: The actual minimum scalar value used for the output. -- -- * __output_max__: The actual maximum scalar value used for the output. quantizeV2' op'options input min_range max_range | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs min_range, buildInputs max_range] return (opDef "QuantizeV2" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type: DT_FLOAT } input_arg { name: "min_range" description: "The minimum scalar value possibly produced for the input." type: DT_FLOAT } input_arg { name: "max_range" description: "The maximum scalar value possibly produced for the input." type: DT_FLOAT } output_arg { name: "output" description: "The quantized data produced from the float input." type_attr: "T" } output_arg { name: "output_min" description: "The actual minimum scalar value used for the output." type: DT_FLOAT } output_arg { name: "output_max" description: "The actual maximum scalar value used for the output." type: DT_FLOAT } attr { name: "T" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT16 type: DT_QUINT16 type: DT_QINT32 } } } attr { name: "mode" type: "string" default_value { s: "MIN_COMBINED" } allowed_values { list { s: "MIN_COMBINED" s: "MIN_FIRST" } } } -} -- | Produces the average pool of the input tensor for quantized types. quantizedAvgPool :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] t) => Tensor v'1 t -- ^ __input__: 4-D with shape `[batch, height, width, channels]`. -> Tensor v'2 Float -- ^ __min_input__: The float value that the lowest quantized input value represents. -> Tensor v'3 Float -- ^ __max_input__: The float value that the highest quantized input value represents. -> (Tensor Build t, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __min_output__, __max_output__) -- -- * __output__ -- -- * __min_output__: The float value that the lowest quantized output value represents. -- -- * __max_output__: The float value that the highest quantized output value represents. quantizedAvgPool = quantizedAvgPool' id quantizedAvgPool' :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] t) => OpParams -> Tensor v'1 t -- ^ __input__: 4-D with shape `[batch, height, width, channels]`. -> Tensor v'2 Float -- ^ __min_input__: The float value that the lowest quantized input value represents. -> Tensor v'3 Float -- ^ __max_input__: The float value that the highest quantized input value represents. -> (Tensor Build t, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __min_output__, __max_output__) -- -- * __output__ -- -- * __min_output__: The float value that the lowest quantized output value represents. -- -- * __max_output__: The float value that the highest quantized output value represents. quantizedAvgPool' op'options input min_input max_input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs min_input, buildInputs max_input] return (opDef "QuantizedAvgPool" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "4-D with shape `[batch, height, width, channels]`." type_attr: "T" } input_arg { name: "min_input" description: "The float value that the lowest quantized input value represents." type: DT_FLOAT } input_arg { name: "max_input" description: "The float value that the highest quantized input value represents." type: DT_FLOAT } output_arg { name: "output" type_attr: "T" } output_arg { name: "min_output" description: "The float value that the lowest quantized output value represents." type: DT_FLOAT } output_arg { name: "max_output" description: "The float value that the highest quantized output value represents." type: DT_FLOAT } attr { name: "T" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT16 type: DT_QUINT16 type: DT_QINT32 } } } attr { name: "ksize" type: "list(int)" description: "The size of the window for each dimension of the input tensor.\nThe length must be 4 to match the number of dimensions of the input." } attr { name: "strides" type: "list(int)" description: "The stride of the sliding window for each dimension of the input\ntensor. The length must be 4 to match the number of dimensions of the input." } attr { name: "padding" type: "string" description: "The type of padding algorithm to use." allowed_values { list { s: "SAME" s: "VALID" } } } -} -- | Quantized Batch normalization. -- -- This op is deprecated and will be removed in the future. Prefer -- `tf.nn.batch_normalization`. quantizedBatchNormWithGlobalNormalization :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 v'10 v'11 v'12 v'13 v'14 v'15 tinput out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tinput, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] out_type) => Bool -- ^ __scale_after_normalization__: A bool indicating whether the resulted tensor -- needs to be multiplied with gamma. -> Float -- ^ __variance_epsilon__: A small float number to avoid dividing by 0. -> Tensor v'1 tinput -- ^ __t__: A 4D input Tensor. -> Tensor v'2 Float -- ^ __t_min__: The value represented by the lowest quantized input. -> Tensor v'3 Float -- ^ __t_max__: The value represented by the highest quantized input. -> Tensor v'4 tinput -- ^ __m__: A 1D mean Tensor with size matching the last dimension of t. -- This is the first output from tf.nn.moments, -- or a saved moving average thereof. -> Tensor v'5 Float -- ^ __m_min__: The value represented by the lowest quantized mean. -> Tensor v'6 Float -- ^ __m_max__: The value represented by the highest quantized mean. -> Tensor v'7 tinput -- ^ __v__: A 1D variance Tensor with size matching the last dimension of t. -- This is the second output from tf.nn.moments, -- or a saved moving average thereof. -> Tensor v'8 Float -- ^ __v_min__: The value represented by the lowest quantized variance. -> Tensor v'9 Float -- ^ __v_max__: The value represented by the highest quantized variance. -> Tensor v'10 tinput -- ^ __beta__: A 1D beta Tensor with size matching the last dimension of t. -- An offset to be added to the normalized tensor. -> Tensor v'11 Float -- ^ __beta_min__: The value represented by the lowest quantized offset. -> Tensor v'12 Float -- ^ __beta_max__: The value represented by the highest quantized offset. -> Tensor v'13 tinput -- ^ __gamma__: A 1D gamma Tensor with size matching the last dimension of t. -- If "scale_after_normalization" is true, this tensor will be multiplied -- with the normalized tensor. -> Tensor v'14 Float -- ^ __gamma_min__: The value represented by the lowest quantized gamma. -> Tensor v'15 Float -- ^ __gamma_max__: The value represented by the highest quantized gamma. -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- ^ (__result__, __result_min__, __result_max__) -- -- * __result__ -- -- * __result_min__ -- -- * __result_max__ quantizedBatchNormWithGlobalNormalization = quantizedBatchNormWithGlobalNormalization' id quantizedBatchNormWithGlobalNormalization' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 v'10 v'11 v'12 v'13 v'14 v'15 tinput out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tinput, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] out_type) => OpParams -> Bool -- ^ __scale_after_normalization__: A bool indicating whether the resulted tensor -- needs to be multiplied with gamma. -> Float -- ^ __variance_epsilon__: A small float number to avoid dividing by 0. -> Tensor v'1 tinput -- ^ __t__: A 4D input Tensor. -> Tensor v'2 Float -- ^ __t_min__: The value represented by the lowest quantized input. -> Tensor v'3 Float -- ^ __t_max__: The value represented by the highest quantized input. -> Tensor v'4 tinput -- ^ __m__: A 1D mean Tensor with size matching the last dimension of t. -- This is the first output from tf.nn.moments, -- or a saved moving average thereof. -> Tensor v'5 Float -- ^ __m_min__: The value represented by the lowest quantized mean. -> Tensor v'6 Float -- ^ __m_max__: The value represented by the highest quantized mean. -> Tensor v'7 tinput -- ^ __v__: A 1D variance Tensor with size matching the last dimension of t. -- This is the second output from tf.nn.moments, -- or a saved moving average thereof. -> Tensor v'8 Float -- ^ __v_min__: The value represented by the lowest quantized variance. -> Tensor v'9 Float -- ^ __v_max__: The value represented by the highest quantized variance. -> Tensor v'10 tinput -- ^ __beta__: A 1D beta Tensor with size matching the last dimension of t. -- An offset to be added to the normalized tensor. -> Tensor v'11 Float -- ^ __beta_min__: The value represented by the lowest quantized offset. -> Tensor v'12 Float -- ^ __beta_max__: The value represented by the highest quantized offset. -> Tensor v'13 tinput -- ^ __gamma__: A 1D gamma Tensor with size matching the last dimension of t. -- If "scale_after_normalization" is true, this tensor will be multiplied -- with the normalized tensor. -> Tensor v'14 Float -- ^ __gamma_min__: The value represented by the lowest quantized gamma. -> Tensor v'15 Float -- ^ __gamma_max__: The value represented by the highest quantized gamma. -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- ^ (__result__, __result_min__, __result_max__) -- -- * __result__ -- -- * __result_min__ -- -- * __result_max__ quantizedBatchNormWithGlobalNormalization' op'options scale_after_normalization variance_epsilon t t_min t_max m m_min m_max v v_min v_max beta beta_min beta_max gamma gamma_min gamma_max | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs t, buildInputs t_min, buildInputs t_max, buildInputs m, buildInputs m_min, buildInputs m_max, buildInputs v, buildInputs v_min, buildInputs v_max, buildInputs beta, buildInputs beta_min, buildInputs beta_max, buildInputs gamma, buildInputs gamma_min, buildInputs gamma_max] return (opDef "QuantizedBatchNormWithGlobalNormalization" & opAttr "Tinput" .~ tensorType (undefined :: tinput) & opAttr "out_type" .~ tensorType (undefined :: out_type) & opAttr "scale_after_normalization" .~ scale_after_normalization & opAttr "variance_epsilon" .~ variance_epsilon & op'options & opInputs .~ op'inputs) {- input_arg { name: "t" description: "A 4D input Tensor." type_attr: "Tinput" } input_arg { name: "t_min" description: "The value represented by the lowest quantized input." type: DT_FLOAT } input_arg { name: "t_max" description: "The value represented by the highest quantized input." type: DT_FLOAT } input_arg { name: "m" description: "A 1D mean Tensor with size matching the last dimension of t.\nThis is the first output from tf.nn.moments,\nor a saved moving average thereof." type_attr: "Tinput" } input_arg { name: "m_min" description: "The value represented by the lowest quantized mean." type: DT_FLOAT } input_arg { name: "m_max" description: "The value represented by the highest quantized mean." type: DT_FLOAT } input_arg { name: "v" description: "A 1D variance Tensor with size matching the last dimension of t.\nThis is the second output from tf.nn.moments,\nor a saved moving average thereof." type_attr: "Tinput" } input_arg { name: "v_min" description: "The value represented by the lowest quantized variance." type: DT_FLOAT } input_arg { name: "v_max" description: "The value represented by the highest quantized variance." type: DT_FLOAT } input_arg { name: "beta" description: "A 1D beta Tensor with size matching the last dimension of t.\nAn offset to be added to the normalized tensor." type_attr: "Tinput" } input_arg { name: "beta_min" description: "The value represented by the lowest quantized offset." type: DT_FLOAT } input_arg { name: "beta_max" description: "The value represented by the highest quantized offset." type: DT_FLOAT } input_arg { name: "gamma" description: "A 1D gamma Tensor with size matching the last dimension of t.\nIf \"scale_after_normalization\" is true, this tensor will be multiplied\nwith the normalized tensor." type_attr: "Tinput" } input_arg { name: "gamma_min" description: "The value represented by the lowest quantized gamma." type: DT_FLOAT } input_arg { name: "gamma_max" description: "The value represented by the highest quantized gamma." type: DT_FLOAT } output_arg { name: "result" type_attr: "out_type" } output_arg { name: "result_min" type: DT_FLOAT } output_arg { name: "result_max" type: DT_FLOAT } attr { name: "Tinput" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT16 type: DT_QUINT16 type: DT_QINT32 } } } attr { name: "out_type" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT16 type: DT_QUINT16 type: DT_QINT32 } } } attr { name: "variance_epsilon" type: "float" description: "A small float number to avoid dividing by 0." } attr { name: "scale_after_normalization" type: "bool" description: "A bool indicating whether the resulted tensor\nneeds to be multiplied with gamma." } -} -- | Adds Tensor 'bias' to Tensor 'input' for Quantized types. -- -- Broadcasts the values of bias on dimensions 0..N-2 of 'input'. quantizedBiasAdd :: forall v'1 v'2 v'3 v'4 v'5 v'6 t1 t2 out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] t1, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] t2, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] out_type) => Tensor v'1 t1 -- ^ __input__ -> Tensor v'2 t2 -- ^ __bias__: A 1D bias Tensor with size matching the last dimension of 'input'. -> Tensor v'3 Float -- ^ __min_input__: The float value that the lowest quantized input value represents. -> Tensor v'4 Float -- ^ __max_input__: The float value that the highest quantized input value represents. -> Tensor v'5 Float -- ^ __min_bias__: The float value that the lowest quantized bias value represents. -> Tensor v'6 Float -- ^ __max_bias__: The float value that the highest quantized bias value represents. -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __min_out__, __max_out__) -- -- * __output__ -- -- * __min_out__: The float value that the lowest quantized output value represents. -- -- * __max_out__: The float value that the highest quantized output value represents. quantizedBiasAdd = quantizedBiasAdd' id quantizedBiasAdd' :: forall v'1 v'2 v'3 v'4 v'5 v'6 t1 t2 out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] t1, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] t2, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] out_type) => OpParams -> Tensor v'1 t1 -- ^ __input__ -> Tensor v'2 t2 -- ^ __bias__: A 1D bias Tensor with size matching the last dimension of 'input'. -> Tensor v'3 Float -- ^ __min_input__: The float value that the lowest quantized input value represents. -> Tensor v'4 Float -- ^ __max_input__: The float value that the highest quantized input value represents. -> Tensor v'5 Float -- ^ __min_bias__: The float value that the lowest quantized bias value represents. -> Tensor v'6 Float -- ^ __max_bias__: The float value that the highest quantized bias value represents. -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __min_out__, __max_out__) -- -- * __output__ -- -- * __min_out__: The float value that the lowest quantized output value represents. -- -- * __max_out__: The float value that the highest quantized output value represents. quantizedBiasAdd' op'options input bias min_input max_input min_bias max_bias | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs bias, buildInputs min_input, buildInputs max_input, buildInputs min_bias, buildInputs max_bias] return (opDef "QuantizedBiasAdd" & opAttr "T1" .~ tensorType (undefined :: t1) & opAttr "T2" .~ tensorType (undefined :: t2) & opAttr "out_type" .~ tensorType (undefined :: out_type) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T1" } input_arg { name: "bias" description: "A 1D bias Tensor with size matching the last dimension of \'input\'." type_attr: "T2" } input_arg { name: "min_input" description: "The float value that the lowest quantized input value represents." type: DT_FLOAT } input_arg { name: "max_input" description: "The float value that the highest quantized input value represents." type: DT_FLOAT } input_arg { name: "min_bias" description: "The float value that the lowest quantized bias value represents." type: DT_FLOAT } input_arg { name: "max_bias" description: "The float value that the highest quantized bias value represents." type: DT_FLOAT } output_arg { name: "output" type_attr: "out_type" } output_arg { name: "min_out" description: "The float value that the lowest quantized output value represents." type: DT_FLOAT } output_arg { name: "max_out" description: "The float value that the highest quantized output value represents." type: DT_FLOAT } attr { name: "T1" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT16 type: DT_QUINT16 type: DT_QINT32 } } } attr { name: "T2" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT16 type: DT_QUINT16 type: DT_QINT32 } } } attr { name: "out_type" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT16 type: DT_QUINT16 type: DT_QINT32 } } } -} -- | Concatenates quantized tensors along one dimension. quantizedConcat :: forall v'1 v'2 v'3 v'4 t . (TensorType t) => Tensor v'1 Data.Int.Int32 -- ^ __concat_dim__: 0-D. The dimension along which to concatenate. Must be in the -- range [0, rank(values)). -> [Tensor v'2 t] -- ^ __values__: The `N` Tensors to concatenate. Their ranks and types must match, -- and their sizes must match in all dimensions except `concat_dim`. -> [Tensor v'3 Float] -- ^ __input_mins__: The minimum scalar values for each of the input tensors. -> [Tensor v'4 Float] -- ^ __input_maxes__: The maximum scalar values for each of the input tensors. -> (Tensor Build t, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __output_min__, __output_max__) -- -- * __output__: A `Tensor` with the concatenation of values stacked along the -- `concat_dim` dimension. This tensor's shape matches that of `values` except -- in `concat_dim` where it has the sum of the sizes. -- -- * __output_min__: The float value that the minimum quantized output value represents. -- -- * __output_max__: The float value that the maximum quantized output value represents. quantizedConcat = quantizedConcat' id quantizedConcat' :: forall v'1 v'2 v'3 v'4 t . (TensorType t) => OpParams -> Tensor v'1 Data.Int.Int32 -- ^ __concat_dim__: 0-D. The dimension along which to concatenate. Must be in the -- range [0, rank(values)). -> [Tensor v'2 t] -- ^ __values__: The `N` Tensors to concatenate. Their ranks and types must match, -- and their sizes must match in all dimensions except `concat_dim`. -> [Tensor v'3 Float] -- ^ __input_mins__: The minimum scalar values for each of the input tensors. -> [Tensor v'4 Float] -- ^ __input_maxes__: The maximum scalar values for each of the input tensors. -> (Tensor Build t, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __output_min__, __output_max__) -- -- * __output__: A `Tensor` with the concatenation of values stacked along the -- `concat_dim` dimension. This tensor's shape matches that of `values` except -- in `concat_dim` where it has the sum of the sizes. -- -- * __output_min__: The float value that the minimum quantized output value represents. -- -- * __output_max__: The float value that the maximum quantized output value represents. quantizedConcat' op'options concat_dim values input_mins input_maxes | eqLengthGuard [("N", [("values", length values), ("input_mins", length input_mins), ("input_maxes", length input_maxes)])] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs concat_dim, buildInputs values, buildInputs input_mins, buildInputs input_maxes] return (opDef "QuantizedConcat" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "N" .~ n & op'options & opInputs .~ op'inputs) where n = fromIntegral (length values) :: Int64 {- input_arg { name: "concat_dim" description: "0-D. The dimension along which to concatenate. Must be in the\nrange [0, rank(values))." type: DT_INT32 } input_arg { name: "values" description: "The `N` Tensors to concatenate. Their ranks and types must match,\nand their sizes must match in all dimensions except `concat_dim`." type_attr: "T" number_attr: "N" } input_arg { name: "input_mins" description: "The minimum scalar values for each of the input tensors." type: DT_FLOAT number_attr: "N" } input_arg { name: "input_maxes" description: "The maximum scalar values for each of the input tensors." type: DT_FLOAT number_attr: "N" } output_arg { name: "output" description: "A `Tensor` with the concatenation of values stacked along the\n`concat_dim` dimension. This tensor\'s shape matches that of `values` except\nin `concat_dim` where it has the sum of the sizes." type_attr: "T" } output_arg { name: "output_min" description: "The float value that the minimum quantized output value represents." type: DT_FLOAT } output_arg { name: "output_max" description: "The float value that the maximum quantized output value represents." type: DT_FLOAT } attr { name: "N" type: "int" has_minimum: true minimum: 2 } attr { name: "T" type: "type" } -} -- | Computes a 2D convolution given quantized 4D input and filter tensors. -- -- The inputs are quantized tensors where the lowest value represents the real -- number of the associated minimum, and the highest represents the maximum. -- This means that you can only interpret the quantized output in the same way, by -- taking the returned minimum and maximum values into account. quantizedConv2D :: forall v'1 v'2 v'3 v'4 v'5 v'6 tinput tfilter out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tinput, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tfilter, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] out_type) => Tensor v'1 tinput -- ^ __input__ -> Tensor v'2 tfilter -- ^ __filter__: filter's input_depth dimension must match input's depth dimensions. -> Tensor v'3 Float -- ^ __min_input__: The float value that the lowest quantized input value represents. -> Tensor v'4 Float -- ^ __max_input__: The float value that the highest quantized input value represents. -> Tensor v'5 Float -- ^ __min_filter__: The float value that the lowest quantized filter value represents. -> Tensor v'6 Float -- ^ __max_filter__: The float value that the highest quantized filter value represents. -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __min_output__, __max_output__) -- -- * __output__ -- -- * __min_output__: The float value that the lowest quantized output value represents. -- -- * __max_output__: The float value that the highest quantized output value represents. quantizedConv2D = quantizedConv2D' id quantizedConv2D' :: forall v'1 v'2 v'3 v'4 v'5 v'6 tinput tfilter out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tinput, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tfilter, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] out_type) => OpParams -> Tensor v'1 tinput -- ^ __input__ -> Tensor v'2 tfilter -- ^ __filter__: filter's input_depth dimension must match input's depth dimensions. -> Tensor v'3 Float -- ^ __min_input__: The float value that the lowest quantized input value represents. -> Tensor v'4 Float -- ^ __max_input__: The float value that the highest quantized input value represents. -> Tensor v'5 Float -- ^ __min_filter__: The float value that the lowest quantized filter value represents. -> Tensor v'6 Float -- ^ __max_filter__: The float value that the highest quantized filter value represents. -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __min_output__, __max_output__) -- -- * __output__ -- -- * __min_output__: The float value that the lowest quantized output value represents. -- -- * __max_output__: The float value that the highest quantized output value represents. quantizedConv2D' op'options input filter min_input max_input min_filter max_filter | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs filter, buildInputs min_input, buildInputs max_input, buildInputs min_filter, buildInputs max_filter] return (opDef "QuantizedConv2D" & opAttr "Tinput" .~ tensorType (undefined :: tinput) & opAttr "Tfilter" .~ tensorType (undefined :: tfilter) & opAttr "out_type" .~ tensorType (undefined :: out_type) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "Tinput" } input_arg { name: "filter" description: "filter\'s input_depth dimension must match input\'s depth dimensions." type_attr: "Tfilter" } input_arg { name: "min_input" description: "The float value that the lowest quantized input value represents." type: DT_FLOAT } input_arg { name: "max_input" description: "The float value that the highest quantized input value represents." type: DT_FLOAT } input_arg { name: "min_filter" description: "The float value that the lowest quantized filter value represents." type: DT_FLOAT } input_arg { name: "max_filter" description: "The float value that the highest quantized filter value represents." type: DT_FLOAT } output_arg { name: "output" type_attr: "out_type" } output_arg { name: "min_output" description: "The float value that the lowest quantized output value represents." type: DT_FLOAT } output_arg { name: "max_output" description: "The float value that the highest quantized output value represents." type: DT_FLOAT } attr { name: "Tinput" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT16 type: DT_QUINT16 type: DT_QINT32 } } } attr { name: "Tfilter" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT16 type: DT_QUINT16 type: DT_QINT32 } } } attr { name: "out_type" type: "type" default_value { type: DT_QINT32 } allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT16 type: DT_QUINT16 type: DT_QINT32 } } } attr { name: "strides" type: "list(int)" description: "The stride of the sliding window for each dimension of the input\ntensor." } attr { name: "padding" type: "string" description: "The type of padding algorithm to use." allowed_values { list { s: "SAME" s: "VALID" } } } -} -- | Quantized Instance normalization. quantizedInstanceNorm :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] t) => Tensor v'1 t -- ^ __x__: A 4D input Tensor. -> Tensor v'2 Float -- ^ __x_min__: The value represented by the lowest quantized input. -> Tensor v'3 Float -- ^ __x_max__: The value represented by the highest quantized input. -> (Tensor Build t, Tensor Build Float, Tensor Build Float) -- ^ (__y__, __y_min__, __y_max__) -- -- * __y__: A 4D Tensor. -- -- * __y_min__: The value represented by the lowest quantized output. -- -- * __y_max__: The value represented by the highest quantized output. quantizedInstanceNorm = quantizedInstanceNorm' id quantizedInstanceNorm' :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] t) => OpParams -> Tensor v'1 t -- ^ __x__: A 4D input Tensor. -> Tensor v'2 Float -- ^ __x_min__: The value represented by the lowest quantized input. -> Tensor v'3 Float -- ^ __x_max__: The value represented by the highest quantized input. -> (Tensor Build t, Tensor Build Float, Tensor Build Float) -- ^ (__y__, __y_min__, __y_max__) -- -- * __y__: A 4D Tensor. -- -- * __y_min__: The value represented by the lowest quantized output. -- -- * __y_max__: The value represented by the highest quantized output. quantizedInstanceNorm' op'options x x_min x_max | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs x_min, buildInputs x_max] return (opDef "QuantizedInstanceNorm" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" description: "A 4D input Tensor." type_attr: "T" } input_arg { name: "x_min" description: "The value represented by the lowest quantized input." type: DT_FLOAT } input_arg { name: "x_max" description: "The value represented by the highest quantized input." type: DT_FLOAT } output_arg { name: "y" description: "A 4D Tensor." type_attr: "T" } output_arg { name: "y_min" description: "The value represented by the lowest quantized output." type: DT_FLOAT } output_arg { name: "y_max" description: "The value represented by the highest quantized output." type: DT_FLOAT } attr { name: "T" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT16 type: DT_QUINT16 type: DT_QINT32 } } } attr { name: "output_range_given" type: "bool" default_value { b: false } description: "If True, `given_y_min` and `given_y_min`\nand `given_y_max` are used as the output range. Otherwise,\nthe implementation computes the output range." } attr { name: "given_y_min" type: "float" default_value { f: 0.0 } description: "Output in `y_min` if `output_range_given` is True." } attr { name: "given_y_max" type: "float" default_value { f: 0.0 } description: "Output in `y_max` if `output_range_given` is True." } attr { name: "variance_epsilon" type: "float" default_value { f: 1.0e-5 } description: "A small float number to avoid dividing by 0." } attr { name: "min_separation" type: "float" default_value { f: 1.0e-3 } description: "Minimum value of `y_max - y_min`" } -} -- | Perform a quantized matrix multiplication of `a` by the matrix `b`. -- -- The inputs must be two-dimensional matrices and the inner dimension of -- `a` (after being transposed if `transpose_a` is non-zero) must match the -- outer dimension of `b` (after being transposed if `transposed_b` is -- non-zero). quantizedMatMul :: forall v'1 v'2 v'3 v'4 v'5 v'6 t1 t2 toutput . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] t1, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] t2, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] toutput) => Tensor v'1 t1 -- ^ __a__: Must be a two-dimensional tensor. -> Tensor v'2 t2 -- ^ __b__: Must be a two-dimensional tensor. -> Tensor v'3 Float -- ^ __min_a__: The float value that the lowest quantized `a` value represents. -> Tensor v'4 Float -- ^ __max_a__: The float value that the highest quantized `a` value represents. -> Tensor v'5 Float -- ^ __min_b__: The float value that the lowest quantized `b` value represents. -> Tensor v'6 Float -- ^ __max_b__: The float value that the highest quantized `b` value represents. -> (Tensor Build toutput, Tensor Build Float, Tensor Build Float) -- ^ (__out__, __min_out__, __max_out__) -- -- * __out__ -- -- * __min_out__: The float value that the lowest quantized output value represents. -- -- * __max_out__: The float value that the highest quantized output value represents. quantizedMatMul = quantizedMatMul' id quantizedMatMul' :: forall v'1 v'2 v'3 v'4 v'5 v'6 t1 t2 toutput . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] t1, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] t2, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] toutput) => OpParams -> Tensor v'1 t1 -- ^ __a__: Must be a two-dimensional tensor. -> Tensor v'2 t2 -- ^ __b__: Must be a two-dimensional tensor. -> Tensor v'3 Float -- ^ __min_a__: The float value that the lowest quantized `a` value represents. -> Tensor v'4 Float -- ^ __max_a__: The float value that the highest quantized `a` value represents. -> Tensor v'5 Float -- ^ __min_b__: The float value that the lowest quantized `b` value represents. -> Tensor v'6 Float -- ^ __max_b__: The float value that the highest quantized `b` value represents. -> (Tensor Build toutput, Tensor Build Float, Tensor Build Float) -- ^ (__out__, __min_out__, __max_out__) -- -- * __out__ -- -- * __min_out__: The float value that the lowest quantized output value represents. -- -- * __max_out__: The float value that the highest quantized output value represents. quantizedMatMul' op'options a b min_a max_a min_b max_b | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs a, buildInputs b, buildInputs min_a, buildInputs max_a, buildInputs min_b, buildInputs max_b] return (opDef "QuantizedMatMul" & opAttr "T1" .~ tensorType (undefined :: t1) & opAttr "T2" .~ tensorType (undefined :: t2) & opAttr "Toutput" .~ tensorType (undefined :: toutput) & op'options & opInputs .~ op'inputs) {- input_arg { name: "a" description: "Must be a two-dimensional tensor." type_attr: "T1" } input_arg { name: "b" description: "Must be a two-dimensional tensor." type_attr: "T2" } input_arg { name: "min_a" description: "The float value that the lowest quantized `a` value represents." type: DT_FLOAT } input_arg { name: "max_a" description: "The float value that the highest quantized `a` value represents." type: DT_FLOAT } input_arg { name: "min_b" description: "The float value that the lowest quantized `b` value represents." type: DT_FLOAT } input_arg { name: "max_b" description: "The float value that the highest quantized `b` value represents." type: DT_FLOAT } output_arg { name: "out" type_attr: "Toutput" } output_arg { name: "min_out" description: "The float value that the lowest quantized output value represents." type: DT_FLOAT } output_arg { name: "max_out" description: "The float value that the highest quantized output value represents." type: DT_FLOAT } attr { name: "T1" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT16 type: DT_QUINT16 type: DT_QINT32 } } } attr { name: "T2" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT16 type: DT_QUINT16 type: DT_QINT32 } } } attr { name: "Toutput" type: "type" default_value { type: DT_QINT32 } allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT16 type: DT_QUINT16 type: DT_QINT32 } } } attr { name: "transpose_a" type: "bool" default_value { b: false } description: "If true, `a` is transposed before multiplication." } attr { name: "transpose_b" type: "bool" default_value { b: false } description: "If true, `b` is transposed before multiplication." } attr { name: "Tactivation" type: "type" default_value { type: DT_QUINT8 } description: "The type of output produced by activation function\nfollowing this operation." allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT16 type: DT_QUINT16 type: DT_QINT32 } } } -} -- | Produces the max pool of the input tensor for quantized types. quantizedMaxPool :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] t) => Tensor v'1 t -- ^ __input__: The 4D (batch x rows x cols x depth) Tensor to MaxReduce over. -> Tensor v'2 Float -- ^ __min_input__: The float value that the lowest quantized input value represents. -> Tensor v'3 Float -- ^ __max_input__: The float value that the highest quantized input value represents. -> (Tensor Build t, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __min_output__, __max_output__) -- -- * __output__ -- -- * __min_output__: The float value that the lowest quantized output value represents. -- -- * __max_output__: The float value that the highest quantized output value represents. quantizedMaxPool = quantizedMaxPool' id quantizedMaxPool' :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] t) => OpParams -> Tensor v'1 t -- ^ __input__: The 4D (batch x rows x cols x depth) Tensor to MaxReduce over. -> Tensor v'2 Float -- ^ __min_input__: The float value that the lowest quantized input value represents. -> Tensor v'3 Float -- ^ __max_input__: The float value that the highest quantized input value represents. -> (Tensor Build t, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __min_output__, __max_output__) -- -- * __output__ -- -- * __min_output__: The float value that the lowest quantized output value represents. -- -- * __max_output__: The float value that the highest quantized output value represents. quantizedMaxPool' op'options input min_input max_input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs min_input, buildInputs max_input] return (opDef "QuantizedMaxPool" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "The 4D (batch x rows x cols x depth) Tensor to MaxReduce over." type_attr: "T" } input_arg { name: "min_input" description: "The float value that the lowest quantized input value represents." type: DT_FLOAT } input_arg { name: "max_input" description: "The float value that the highest quantized input value represents." type: DT_FLOAT } output_arg { name: "output" type_attr: "T" } output_arg { name: "min_output" description: "The float value that the lowest quantized output value represents." type: DT_FLOAT } output_arg { name: "max_output" description: "The float value that the highest quantized output value represents." type: DT_FLOAT } attr { name: "T" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT16 type: DT_QUINT16 type: DT_QINT32 } } } attr { name: "ksize" type: "list(int)" description: "The size of the window for each dimension of the input tensor.\nThe length must be 4 to match the number of dimensions of the input." } attr { name: "strides" type: "list(int)" description: "The stride of the sliding window for each dimension of the input\ntensor. The length must be 4 to match the number of dimensions of the input." } attr { name: "padding" type: "string" description: "The type of padding algorithm to use." allowed_values { list { s: "SAME" s: "VALID" } } } -} -- | Returns x * y element-wise, working on quantized buffers. quantizedMul :: forall v'1 v'2 v'3 v'4 v'5 v'6 t1 t2 toutput . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] t1, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] t2, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] toutput) => Tensor v'1 t1 -- ^ __x__ -> Tensor v'2 t2 -- ^ __y__ -> Tensor v'3 Float -- ^ __min_x__: The float value that the lowest quantized `x` value represents. -> Tensor v'4 Float -- ^ __max_x__: The float value that the highest quantized `x` value represents. -> Tensor v'5 Float -- ^ __min_y__: The float value that the lowest quantized `y` value represents. -> Tensor v'6 Float -- ^ __max_y__: The float value that the highest quantized `y` value represents. -> (Tensor Build toutput, Tensor Build Float, Tensor Build Float) -- ^ (__z__, __min_z__, __max_z__) -- -- * __z__ -- -- * __min_z__: The float value that the lowest quantized output value represents. -- -- * __max_z__: The float value that the highest quantized output value represents. -- -- *NOTE*: `QuantizedMul` supports limited forms of broadcasting. More about -- broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) quantizedMul = quantizedMul' id quantizedMul' :: forall v'1 v'2 v'3 v'4 v'5 v'6 t1 t2 toutput . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] t1, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] t2, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] toutput) => OpParams -> Tensor v'1 t1 -- ^ __x__ -> Tensor v'2 t2 -- ^ __y__ -> Tensor v'3 Float -- ^ __min_x__: The float value that the lowest quantized `x` value represents. -> Tensor v'4 Float -- ^ __max_x__: The float value that the highest quantized `x` value represents. -> Tensor v'5 Float -- ^ __min_y__: The float value that the lowest quantized `y` value represents. -> Tensor v'6 Float -- ^ __max_y__: The float value that the highest quantized `y` value represents. -> (Tensor Build toutput, Tensor Build Float, Tensor Build Float) -- ^ (__z__, __min_z__, __max_z__) -- -- * __z__ -- -- * __min_z__: The float value that the lowest quantized output value represents. -- -- * __max_z__: The float value that the highest quantized output value represents. -- -- *NOTE*: `QuantizedMul` supports limited forms of broadcasting. More about -- broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) quantizedMul' op'options x y min_x max_x min_y max_y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y, buildInputs min_x, buildInputs max_x, buildInputs min_y, buildInputs max_y] return (opDef "QuantizedMul" & opAttr "T1" .~ tensorType (undefined :: t1) & opAttr "T2" .~ tensorType (undefined :: t2) & opAttr "Toutput" .~ tensorType (undefined :: toutput) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T1" } input_arg { name: "y" type_attr: "T2" } input_arg { name: "min_x" description: "The float value that the lowest quantized `x` value represents." type: DT_FLOAT } input_arg { name: "max_x" description: "The float value that the highest quantized `x` value represents." type: DT_FLOAT } input_arg { name: "min_y" description: "The float value that the lowest quantized `y` value represents." type: DT_FLOAT } input_arg { name: "max_y" description: "The float value that the highest quantized `y` value represents." type: DT_FLOAT } output_arg { name: "z" type_attr: "Toutput" } output_arg { name: "min_z" description: "The float value that the lowest quantized output value represents." type: DT_FLOAT } output_arg { name: "max_z" description: "The float value that the highest quantized output value represents.\n\n*NOTE*: `QuantizedMul` supports limited forms of broadcasting. More about\nbroadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)" type: DT_FLOAT } attr { name: "T1" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT16 type: DT_QUINT16 type: DT_QINT32 } } } attr { name: "T2" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT16 type: DT_QUINT16 type: DT_QINT32 } } } attr { name: "Toutput" type: "type" default_value { type: DT_QINT32 } allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT16 type: DT_QUINT16 type: DT_QINT32 } } } -} -- | Computes Quantized Rectified Linear: `max(features, 0)` quantizedRelu :: forall v'1 v'2 v'3 tinput out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tinput, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] out_type) => Tensor v'1 tinput -- ^ __features__ -> Tensor v'2 Float -- ^ __min_features__: The float value that the lowest quantized value represents. -> Tensor v'3 Float -- ^ __max_features__: The float value that the highest quantized value represents. -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- ^ (__activations__, __min_activations__, __max_activations__) -- -- * __activations__: Has the same output shape as "features". -- -- * __min_activations__: The float value that the lowest quantized value represents. -- -- * __max_activations__: The float value that the highest quantized value represents. quantizedRelu = quantizedRelu' id quantizedRelu' :: forall v'1 v'2 v'3 tinput out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tinput, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] out_type) => OpParams -> Tensor v'1 tinput -- ^ __features__ -> Tensor v'2 Float -- ^ __min_features__: The float value that the lowest quantized value represents. -> Tensor v'3 Float -- ^ __max_features__: The float value that the highest quantized value represents. -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- ^ (__activations__, __min_activations__, __max_activations__) -- -- * __activations__: Has the same output shape as "features". -- -- * __min_activations__: The float value that the lowest quantized value represents. -- -- * __max_activations__: The float value that the highest quantized value represents. quantizedRelu' op'options features min_features max_features | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs features, buildInputs min_features, buildInputs max_features] return (opDef "QuantizedRelu" & opAttr "Tinput" .~ tensorType (undefined :: tinput) & opAttr "out_type" .~ tensorType (undefined :: out_type) & op'options & opInputs .~ op'inputs) {- input_arg { name: "features" type_attr: "Tinput" } input_arg { name: "min_features" description: "The float value that the lowest quantized value represents." type: DT_FLOAT } input_arg { name: "max_features" description: "The float value that the highest quantized value represents." type: DT_FLOAT } output_arg { name: "activations" description: "Has the same output shape as \"features\"." type_attr: "out_type" } output_arg { name: "min_activations" description: "The float value that the lowest quantized value represents." type: DT_FLOAT } output_arg { name: "max_activations" description: "The float value that the highest quantized value represents." type: DT_FLOAT } attr { name: "Tinput" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT16 type: DT_QUINT16 type: DT_QINT32 } } } attr { name: "out_type" type: "type" default_value { type: DT_QUINT8 } allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT16 type: DT_QUINT16 type: DT_QINT32 } } } -} -- | Computes Quantized Rectified Linear 6: `min(max(features, 0), 6)` quantizedRelu6 :: forall v'1 v'2 v'3 tinput out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tinput, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] out_type) => Tensor v'1 tinput -- ^ __features__ -> Tensor v'2 Float -- ^ __min_features__: The float value that the lowest quantized value represents. -> Tensor v'3 Float -- ^ __max_features__: The float value that the highest quantized value represents. -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- ^ (__activations__, __min_activations__, __max_activations__) -- -- * __activations__: Has the same output shape as "features". -- -- * __min_activations__: The float value that the lowest quantized value represents. -- -- * __max_activations__: The float value that the highest quantized value represents. quantizedRelu6 = quantizedRelu6' id quantizedRelu6' :: forall v'1 v'2 v'3 tinput out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tinput, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] out_type) => OpParams -> Tensor v'1 tinput -- ^ __features__ -> Tensor v'2 Float -- ^ __min_features__: The float value that the lowest quantized value represents. -> Tensor v'3 Float -- ^ __max_features__: The float value that the highest quantized value represents. -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- ^ (__activations__, __min_activations__, __max_activations__) -- -- * __activations__: Has the same output shape as "features". -- -- * __min_activations__: The float value that the lowest quantized value represents. -- -- * __max_activations__: The float value that the highest quantized value represents. quantizedRelu6' op'options features min_features max_features | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs features, buildInputs min_features, buildInputs max_features] return (opDef "QuantizedRelu6" & opAttr "Tinput" .~ tensorType (undefined :: tinput) & opAttr "out_type" .~ tensorType (undefined :: out_type) & op'options & opInputs .~ op'inputs) {- input_arg { name: "features" type_attr: "Tinput" } input_arg { name: "min_features" description: "The float value that the lowest quantized value represents." type: DT_FLOAT } input_arg { name: "max_features" description: "The float value that the highest quantized value represents." type: DT_FLOAT } output_arg { name: "activations" description: "Has the same output shape as \"features\"." type_attr: "out_type" } output_arg { name: "min_activations" description: "The float value that the lowest quantized value represents." type: DT_FLOAT } output_arg { name: "max_activations" description: "The float value that the highest quantized value represents." type: DT_FLOAT } attr { name: "Tinput" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT16 type: DT_QUINT16 type: DT_QINT32 } } } attr { name: "out_type" type: "type" default_value { type: DT_QUINT8 } allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT16 type: DT_QUINT16 type: DT_QINT32 } } } -} -- | Computes Quantized Rectified Linear X: `min(max(features, 0), max_value)` quantizedReluX :: forall v'1 v'2 v'3 v'4 tinput out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tinput, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] out_type) => Tensor v'1 tinput -- ^ __features__ -> Tensor v'2 Float -- ^ __max_value__ -> Tensor v'3 Float -- ^ __min_features__: The float value that the lowest quantized value represents. -> Tensor v'4 Float -- ^ __max_features__: The float value that the highest quantized value represents. -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- ^ (__activations__, __min_activations__, __max_activations__) -- -- * __activations__: Has the same output shape as "features". -- -- * __min_activations__: The float value that the lowest quantized value represents. -- -- * __max_activations__: The float value that the highest quantized value represents. quantizedReluX = quantizedReluX' id quantizedReluX' :: forall v'1 v'2 v'3 v'4 tinput out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tinput, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] out_type) => OpParams -> Tensor v'1 tinput -- ^ __features__ -> Tensor v'2 Float -- ^ __max_value__ -> Tensor v'3 Float -- ^ __min_features__: The float value that the lowest quantized value represents. -> Tensor v'4 Float -- ^ __max_features__: The float value that the highest quantized value represents. -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- ^ (__activations__, __min_activations__, __max_activations__) -- -- * __activations__: Has the same output shape as "features". -- -- * __min_activations__: The float value that the lowest quantized value represents. -- -- * __max_activations__: The float value that the highest quantized value represents. quantizedReluX' op'options features max_value min_features max_features | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs features, buildInputs max_value, buildInputs min_features, buildInputs max_features] return (opDef "QuantizedReluX" & opAttr "Tinput" .~ tensorType (undefined :: tinput) & opAttr "out_type" .~ tensorType (undefined :: out_type) & op'options & opInputs .~ op'inputs) {- input_arg { name: "features" type_attr: "Tinput" } input_arg { name: "max_value" type: DT_FLOAT } input_arg { name: "min_features" description: "The float value that the lowest quantized value represents." type: DT_FLOAT } input_arg { name: "max_features" description: "The float value that the highest quantized value represents." type: DT_FLOAT } output_arg { name: "activations" description: "Has the same output shape as \"features\"." type_attr: "out_type" } output_arg { name: "min_activations" description: "The float value that the lowest quantized value represents." type: DT_FLOAT } output_arg { name: "max_activations" description: "The float value that the highest quantized value represents." type: DT_FLOAT } attr { name: "Tinput" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT16 type: DT_QUINT16 type: DT_QINT32 } } } attr { name: "out_type" type: "type" default_value { type: DT_QUINT8 } allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT16 type: DT_QUINT16 type: DT_QINT32 } } } -} -- | Reshapes a quantized tensor as per the Reshape op. -- -- ``` quantizedReshape :: forall v'1 v'2 v'3 v'4 t tshape . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tshape) => Tensor v'1 t -- ^ __tensor__ -> Tensor v'2 tshape -- ^ __shape__: Defines the shape of the output tensor. -> Tensor v'3 Float -- ^ __input_min__: The minimum value of the input. -> Tensor v'4 Float -- ^ __input_max__: The maximum value of the input. -> (Tensor Build t, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __output_min__, __output_max__) -- -- * __output__ -- -- * __output_min__: This value is copied from input_min. -- -- * __output_max__: This value is copied from input_max. quantizedReshape = quantizedReshape' id quantizedReshape' :: forall v'1 v'2 v'3 v'4 t tshape . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tshape) => OpParams -> Tensor v'1 t -- ^ __tensor__ -> Tensor v'2 tshape -- ^ __shape__: Defines the shape of the output tensor. -> Tensor v'3 Float -- ^ __input_min__: The minimum value of the input. -> Tensor v'4 Float -- ^ __input_max__: The maximum value of the input. -> (Tensor Build t, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __output_min__, __output_max__) -- -- * __output__ -- -- * __output_min__: This value is copied from input_min. -- -- * __output_max__: This value is copied from input_max. quantizedReshape' op'options tensor shape input_min input_max | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs tensor, buildInputs shape, buildInputs input_min, buildInputs input_max] return (opDef "QuantizedReshape" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tshape" .~ tensorType (undefined :: tshape) & op'options & opInputs .~ op'inputs) {- input_arg { name: "tensor" type_attr: "T" } input_arg { name: "shape" description: "Defines the shape of the output tensor." type_attr: "Tshape" } input_arg { name: "input_min" description: "The minimum value of the input." type: DT_FLOAT } input_arg { name: "input_max" description: "The maximum value of the input." type: DT_FLOAT } output_arg { name: "output" type_attr: "T" } output_arg { name: "output_min" description: "This value is copied from input_min." type: DT_FLOAT } output_arg { name: "output_max" description: "This value is copied from input_max." type: DT_FLOAT } attr { name: "T" type: "type" } attr { name: "Tshape" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | Closes the given queue. -- -- This operation signals that no more elements will be enqueued in the -- given queue. Subsequent Enqueue(Many) operations will fail. -- Subsequent Dequeue(Many) operations will continue to succeed if -- sufficient elements remain in the queue. Subsequent Dequeue(Many) -- operations that would block will fail immediately. queueClose :: forall m' . (MonadBuild m') => Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a queue. -> m' (ControlNode) queueClose = queueClose' id queueClose' :: forall m' . (MonadBuild m') => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a queue. -> m' (ControlNode) queueClose' op'options handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle] buildOp [] (opDef "QueueClose" & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" description: "The handle to a queue." type: DT_STRING is_ref: true } attr { name: "cancel_pending_enqueues" type: "bool" default_value { b: false } description: "If true, all pending enqueue requests that are\nblocked on the given queue will be cancelled." } -} -- | Closes the given queue. -- -- This operation signals that no more elements will be enqueued in the -- given queue. Subsequent Enqueue(Many) operations will fail. -- Subsequent Dequeue(Many) operations will continue to succeed if -- sufficient elements remain in the queue. Subsequent Dequeue(Many) -- operations that would block will fail immediately. queueCloseV2 :: forall v'1 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a queue. -> m' (ControlNode) queueCloseV2 = queueCloseV2' id queueCloseV2' :: forall v'1 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a queue. -> m' (ControlNode) queueCloseV2' op'options handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle] buildOp [] (opDef "QueueCloseV2" & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" description: "The handle to a queue." type: DT_RESOURCE } attr { name: "cancel_pending_enqueues" type: "bool" default_value { b: false } description: "If true, all pending enqueue requests that are\nblocked on the given queue will be cancelled." } -} -- | Dequeues a tuple of one or more tensors from the given queue. -- -- This operation has k outputs, where k is the number of components -- in the tuples stored in the given queue, and output i is the ith -- component of the dequeued tuple. -- -- N.B. If the queue is empty, this operation will block until an element -- has been dequeued (or 'timeout_ms' elapses, if specified). queueDequeue :: forall component_types m' . (MonadBuild m', TensorTypes component_types) => Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a queue. -> m' (TensorList (Value) component_types) -- ^ __components__: One or more tensors that were dequeued as a tuple. queueDequeue = queueDequeue' id queueDequeue' :: forall component_types m' . (MonadBuild m', TensorTypes component_types) => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a queue. -> m' (TensorList (Value) component_types) -- ^ __components__: One or more tensors that were dequeued as a tuple. queueDequeue' op'options handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle] buildOp [] (opDef "QueueDequeue" & opAttr "component_types" .~ fromTensorTypes (Proxy :: Proxy component_types) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" description: "The handle to a queue." type: DT_STRING is_ref: true } output_arg { name: "components" description: "One or more tensors that were dequeued as a tuple." type_list_attr: "component_types" } attr { name: "component_types" type: "list(type)" description: "The type of each component in a tuple." has_minimum: true minimum: 1 } attr { name: "timeout_ms" type: "int" default_value { i: -1 } description: "If the queue is empty, this operation will block for up to\ntimeout_ms milliseconds.\nNote: This option is not supported yet." } -} -- | Dequeues n tuples of one or more tensors from the given queue. -- -- If the queue is closed and there are fewer than n elements, then an -- OutOfRange error is returned. -- -- This operation concatenates queue-element component tensors along the -- 0th dimension to make a single component tensor. All of the components -- in the dequeued tuple will have size n in the 0th dimension. -- -- This operation has k outputs, where k is the number of components in -- the tuples stored in the given queue, and output i is the ith -- component of the dequeued tuple. -- -- N.B. If the queue is empty, this operation will block until n elements -- have been dequeued (or 'timeout_ms' elapses, if specified). queueDequeueMany :: forall v'2 component_types m' . (MonadBuild m', TensorTypes component_types) => Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a queue. -> Tensor v'2 Data.Int.Int32 -- ^ __n__: The number of tuples to dequeue. -> m' (TensorList (Value) component_types) -- ^ __components__: One or more tensors that were dequeued as a tuple. queueDequeueMany = queueDequeueMany' id queueDequeueMany' :: forall v'2 component_types m' . (MonadBuild m', TensorTypes component_types) => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a queue. -> Tensor v'2 Data.Int.Int32 -- ^ __n__: The number of tuples to dequeue. -> m' (TensorList (Value) component_types) -- ^ __components__: One or more tensors that were dequeued as a tuple. queueDequeueMany' op'options handle n | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs n] buildOp [] (opDef "QueueDequeueMany" & opAttr "component_types" .~ fromTensorTypes (Proxy :: Proxy component_types) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" description: "The handle to a queue." type: DT_STRING is_ref: true } input_arg { name: "n" description: "The number of tuples to dequeue." type: DT_INT32 } output_arg { name: "components" description: "One or more tensors that were dequeued as a tuple." type_list_attr: "component_types" } attr { name: "component_types" type: "list(type)" description: "The type of each component in a tuple." has_minimum: true minimum: 1 } attr { name: "timeout_ms" type: "int" default_value { i: -1 } description: "If the queue has fewer than n elements, this operation\nwill block for up to timeout_ms milliseconds.\nNote: This option is not supported yet." } -} -- | Dequeues n tuples of one or more tensors from the given queue. -- -- If the queue is closed and there are fewer than n elements, then an -- OutOfRange error is returned. -- -- This operation concatenates queue-element component tensors along the -- 0th dimension to make a single component tensor. All of the components -- in the dequeued tuple will have size n in the 0th dimension. -- -- This operation has k outputs, where k is the number of components in -- the tuples stored in the given queue, and output i is the ith -- component of the dequeued tuple. -- -- N.B. If the queue is empty, this operation will block until n elements -- have been dequeued (or 'timeout_ms' elapses, if specified). queueDequeueManyV2 :: forall v'1 v'2 component_types m' . (MonadBuild m', TensorTypes component_types) => Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a queue. -> Tensor v'2 Data.Int.Int32 -- ^ __n__: The number of tuples to dequeue. -> m' (TensorList (Value) component_types) -- ^ __components__: One or more tensors that were dequeued as a tuple. queueDequeueManyV2 = queueDequeueManyV2' id queueDequeueManyV2' :: forall v'1 v'2 component_types m' . (MonadBuild m', TensorTypes component_types) => OpParams -> Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a queue. -> Tensor v'2 Data.Int.Int32 -- ^ __n__: The number of tuples to dequeue. -> m' (TensorList (Value) component_types) -- ^ __components__: One or more tensors that were dequeued as a tuple. queueDequeueManyV2' op'options handle n | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs n] buildOp [] (opDef "QueueDequeueManyV2" & opAttr "component_types" .~ fromTensorTypes (Proxy :: Proxy component_types) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" description: "The handle to a queue." type: DT_RESOURCE } input_arg { name: "n" description: "The number of tuples to dequeue." type: DT_INT32 } output_arg { name: "components" description: "One or more tensors that were dequeued as a tuple." type_list_attr: "component_types" } attr { name: "component_types" type: "list(type)" description: "The type of each component in a tuple." has_minimum: true minimum: 1 } attr { name: "timeout_ms" type: "int" default_value { i: -1 } description: "If the queue has fewer than n elements, this operation\nwill block for up to timeout_ms milliseconds.\nNote: This option is not supported yet." } -} -- | Dequeues n tuples of one or more tensors from the given queue. -- -- This operation is not supported by all queues. If a queue does not support -- DequeueUpTo, then an Unimplemented error is returned. -- -- If the queue is closed and there are more than 0 but less than n elements -- remaining, then instead of returning an OutOfRange error like -- QueueDequeueMany, less than `n` elements are returned immediately. If the queue -- is closed and there are 0 elements left in the queue, then an OutOfRange -- error is returned just like in QueueDequeueMany. Otherwise the behavior -- is identical to QueueDequeueMany: -- -- This operation concatenates queue-element component tensors along the -- 0th dimension to make a single component tensor. All of the components -- in the dequeued tuple will have size n in the 0th dimension. -- -- This operation has k outputs, where k is the number of components in -- the tuples stored in the given queue, and output i is the ith -- component of the dequeued tuple. queueDequeueUpTo :: forall v'2 component_types m' . (MonadBuild m', TensorTypes component_types) => Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a queue. -> Tensor v'2 Data.Int.Int32 -- ^ __n__: The number of tuples to dequeue. -> m' (TensorList (Value) component_types) -- ^ __components__: One or more tensors that were dequeued as a tuple. queueDequeueUpTo = queueDequeueUpTo' id queueDequeueUpTo' :: forall v'2 component_types m' . (MonadBuild m', TensorTypes component_types) => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a queue. -> Tensor v'2 Data.Int.Int32 -- ^ __n__: The number of tuples to dequeue. -> m' (TensorList (Value) component_types) -- ^ __components__: One or more tensors that were dequeued as a tuple. queueDequeueUpTo' op'options handle n | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs n] buildOp [] (opDef "QueueDequeueUpTo" & opAttr "component_types" .~ fromTensorTypes (Proxy :: Proxy component_types) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" description: "The handle to a queue." type: DT_STRING is_ref: true } input_arg { name: "n" description: "The number of tuples to dequeue." type: DT_INT32 } output_arg { name: "components" description: "One or more tensors that were dequeued as a tuple." type_list_attr: "component_types" } attr { name: "component_types" type: "list(type)" description: "The type of each component in a tuple." has_minimum: true minimum: 1 } attr { name: "timeout_ms" type: "int" default_value { i: -1 } description: "If the queue has fewer than n elements, this operation\nwill block for up to timeout_ms milliseconds.\nNote: This option is not supported yet." } -} -- | Dequeues n tuples of one or more tensors from the given queue. -- -- This operation is not supported by all queues. If a queue does not support -- DequeueUpTo, then an Unimplemented error is returned. -- -- If the queue is closed and there are more than 0 but less than n elements -- remaining, then instead of returning an OutOfRange error like -- QueueDequeueMany, less than `n` elements are returned immediately. If the queue -- is closed and there are 0 elements left in the queue, then an OutOfRange -- error is returned just like in QueueDequeueMany. Otherwise the behavior -- is identical to QueueDequeueMany: -- -- This operation concatenates queue-element component tensors along the -- 0th dimension to make a single component tensor. All of the components -- in the dequeued tuple will have size n in the 0th dimension. -- -- This operation has k outputs, where k is the number of components in -- the tuples stored in the given queue, and output i is the ith -- component of the dequeued tuple. queueDequeueUpToV2 :: forall v'1 v'2 component_types m' . (MonadBuild m', TensorTypes component_types) => Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a queue. -> Tensor v'2 Data.Int.Int32 -- ^ __n__: The number of tuples to dequeue. -> m' (TensorList (Value) component_types) -- ^ __components__: One or more tensors that were dequeued as a tuple. queueDequeueUpToV2 = queueDequeueUpToV2' id queueDequeueUpToV2' :: forall v'1 v'2 component_types m' . (MonadBuild m', TensorTypes component_types) => OpParams -> Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a queue. -> Tensor v'2 Data.Int.Int32 -- ^ __n__: The number of tuples to dequeue. -> m' (TensorList (Value) component_types) -- ^ __components__: One or more tensors that were dequeued as a tuple. queueDequeueUpToV2' op'options handle n | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs n] buildOp [] (opDef "QueueDequeueUpToV2" & opAttr "component_types" .~ fromTensorTypes (Proxy :: Proxy component_types) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" description: "The handle to a queue." type: DT_RESOURCE } input_arg { name: "n" description: "The number of tuples to dequeue." type: DT_INT32 } output_arg { name: "components" description: "One or more tensors that were dequeued as a tuple." type_list_attr: "component_types" } attr { name: "component_types" type: "list(type)" description: "The type of each component in a tuple." has_minimum: true minimum: 1 } attr { name: "timeout_ms" type: "int" default_value { i: -1 } description: "If the queue has fewer than n elements, this operation\nwill block for up to timeout_ms milliseconds.\nNote: This option is not supported yet." } -} -- | Dequeues a tuple of one or more tensors from the given queue. -- -- This operation has k outputs, where k is the number of components -- in the tuples stored in the given queue, and output i is the ith -- component of the dequeued tuple. -- -- N.B. If the queue is empty, this operation will block until an element -- has been dequeued (or 'timeout_ms' elapses, if specified). queueDequeueV2 :: forall v'1 component_types m' . (MonadBuild m', TensorTypes component_types) => Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a queue. -> m' (TensorList (Value) component_types) -- ^ __components__: One or more tensors that were dequeued as a tuple. queueDequeueV2 = queueDequeueV2' id queueDequeueV2' :: forall v'1 component_types m' . (MonadBuild m', TensorTypes component_types) => OpParams -> Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a queue. -> m' (TensorList (Value) component_types) -- ^ __components__: One or more tensors that were dequeued as a tuple. queueDequeueV2' op'options handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle] buildOp [] (opDef "QueueDequeueV2" & opAttr "component_types" .~ fromTensorTypes (Proxy :: Proxy component_types) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" description: "The handle to a queue." type: DT_RESOURCE } output_arg { name: "components" description: "One or more tensors that were dequeued as a tuple." type_list_attr: "component_types" } attr { name: "component_types" type: "list(type)" description: "The type of each component in a tuple." has_minimum: true minimum: 1 } attr { name: "timeout_ms" type: "int" default_value { i: -1 } description: "If the queue is empty, this operation will block for up to\ntimeout_ms milliseconds.\nNote: This option is not supported yet." } -} -- | Enqueues a tuple of one or more tensors in the given queue. -- -- The components input has k elements, which correspond to the components of -- tuples stored in the given queue. -- -- N.B. If the queue is full, this operation will block until the given -- element has been enqueued (or 'timeout_ms' elapses, if specified). queueEnqueue :: forall v'2 tcomponents m' . (MonadBuild m', TensorTypes tcomponents) => Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a queue. -> TensorList (v'2) tcomponents -- ^ __components__: One or more tensors from which the enqueued tensors should be taken. -> m' (ControlNode) queueEnqueue = queueEnqueue' id queueEnqueue' :: forall v'2 tcomponents m' . (MonadBuild m', TensorTypes tcomponents) => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a queue. -> TensorList (v'2) tcomponents -- ^ __components__: One or more tensors from which the enqueued tensors should be taken. -> m' (ControlNode) queueEnqueue' op'options handle components | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs components] buildOp [] (opDef "QueueEnqueue" & opAttr "Tcomponents" .~ fromTensorTypes (Proxy :: Proxy tcomponents) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" description: "The handle to a queue." type: DT_STRING is_ref: true } input_arg { name: "components" description: "One or more tensors from which the enqueued tensors should be taken." type_list_attr: "Tcomponents" } attr { name: "Tcomponents" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "timeout_ms" type: "int" default_value { i: -1 } description: "If the queue is full, this operation will block for up to\ntimeout_ms milliseconds.\nNote: This option is not supported yet." } -} -- | Enqueues zero or more tuples of one or more tensors in the given queue. -- -- This operation slices each component tensor along the 0th dimension to -- make multiple queue elements. All of the tuple components must have the -- same size in the 0th dimension. -- -- The components input has k elements, which correspond to the components of -- tuples stored in the given queue. -- -- N.B. If the queue is full, this operation will block until the given -- elements have been enqueued (or 'timeout_ms' elapses, if specified). queueEnqueueMany :: forall v'2 tcomponents m' . (MonadBuild m', TensorTypes tcomponents) => Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a queue. -> TensorList (v'2) tcomponents -- ^ __components__: One or more tensors from which the enqueued tensors should -- be taken. -> m' (ControlNode) queueEnqueueMany = queueEnqueueMany' id queueEnqueueMany' :: forall v'2 tcomponents m' . (MonadBuild m', TensorTypes tcomponents) => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a queue. -> TensorList (v'2) tcomponents -- ^ __components__: One or more tensors from which the enqueued tensors should -- be taken. -> m' (ControlNode) queueEnqueueMany' op'options handle components | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs components] buildOp [] (opDef "QueueEnqueueMany" & opAttr "Tcomponents" .~ fromTensorTypes (Proxy :: Proxy tcomponents) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" description: "The handle to a queue." type: DT_STRING is_ref: true } input_arg { name: "components" description: "One or more tensors from which the enqueued tensors should\nbe taken." type_list_attr: "Tcomponents" } attr { name: "Tcomponents" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "timeout_ms" type: "int" default_value { i: -1 } description: "If the queue is too full, this operation will block for up\nto timeout_ms milliseconds.\nNote: This option is not supported yet." } -} -- | Enqueues zero or more tuples of one or more tensors in the given queue. -- -- This operation slices each component tensor along the 0th dimension to -- make multiple queue elements. All of the tuple components must have the -- same size in the 0th dimension. -- -- The components input has k elements, which correspond to the components of -- tuples stored in the given queue. -- -- N.B. If the queue is full, this operation will block until the given -- elements have been enqueued (or 'timeout_ms' elapses, if specified). queueEnqueueManyV2 :: forall v'1 v'2 tcomponents m' . (MonadBuild m', TensorTypes tcomponents) => Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a queue. -> TensorList (v'2) tcomponents -- ^ __components__: One or more tensors from which the enqueued tensors should -- be taken. -> m' (ControlNode) queueEnqueueManyV2 = queueEnqueueManyV2' id queueEnqueueManyV2' :: forall v'1 v'2 tcomponents m' . (MonadBuild m', TensorTypes tcomponents) => OpParams -> Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a queue. -> TensorList (v'2) tcomponents -- ^ __components__: One or more tensors from which the enqueued tensors should -- be taken. -> m' (ControlNode) queueEnqueueManyV2' op'options handle components | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs components] buildOp [] (opDef "QueueEnqueueManyV2" & opAttr "Tcomponents" .~ fromTensorTypes (Proxy :: Proxy tcomponents) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" description: "The handle to a queue." type: DT_RESOURCE } input_arg { name: "components" description: "One or more tensors from which the enqueued tensors should\nbe taken." type_list_attr: "Tcomponents" } attr { name: "Tcomponents" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "timeout_ms" type: "int" default_value { i: -1 } description: "If the queue is too full, this operation will block for up\nto timeout_ms milliseconds.\nNote: This option is not supported yet." } -} -- | Enqueues a tuple of one or more tensors in the given queue. -- -- The components input has k elements, which correspond to the components of -- tuples stored in the given queue. -- -- N.B. If the queue is full, this operation will block until the given -- element has been enqueued (or 'timeout_ms' elapses, if specified). queueEnqueueV2 :: forall v'1 v'2 tcomponents m' . (MonadBuild m', TensorTypes tcomponents) => Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a queue. -> TensorList (v'2) tcomponents -- ^ __components__: One or more tensors from which the enqueued tensors should be taken. -> m' (ControlNode) queueEnqueueV2 = queueEnqueueV2' id queueEnqueueV2' :: forall v'1 v'2 tcomponents m' . (MonadBuild m', TensorTypes tcomponents) => OpParams -> Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a queue. -> TensorList (v'2) tcomponents -- ^ __components__: One or more tensors from which the enqueued tensors should be taken. -> m' (ControlNode) queueEnqueueV2' op'options handle components | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs components] buildOp [] (opDef "QueueEnqueueV2" & opAttr "Tcomponents" .~ fromTensorTypes (Proxy :: Proxy tcomponents) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" description: "The handle to a queue." type: DT_RESOURCE } input_arg { name: "components" description: "One or more tensors from which the enqueued tensors should be taken." type_list_attr: "Tcomponents" } attr { name: "Tcomponents" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "timeout_ms" type: "int" default_value { i: -1 } description: "If the queue is full, this operation will block for up to\ntimeout_ms milliseconds.\nNote: This option is not supported yet." } -} -- | Computes the number of elements in the given queue. queueSize :: forall m' . (MonadBuild m') => Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a queue. -> m' (Tensor Value Data.Int.Int32) -- ^ __size__: The number of elements in the given queue. queueSize = queueSize' id queueSize' :: forall m' . (MonadBuild m') => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a queue. -> m' (Tensor Value Data.Int.Int32) -- ^ __size__: The number of elements in the given queue. queueSize' op'options handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle] buildOp [] (opDef "QueueSize" & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" description: "The handle to a queue." type: DT_STRING is_ref: true } output_arg { name: "size" description: "The number of elements in the given queue." type: DT_INT32 } -} -- | Computes the number of elements in the given queue. queueSizeV2 :: forall v'1 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a queue. -> m' (Tensor Value Data.Int.Int32) -- ^ __size__: The number of elements in the given queue. queueSizeV2 = queueSizeV2' id queueSizeV2' :: forall v'1 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a queue. -> m' (Tensor Value Data.Int.Int32) -- ^ __size__: The number of elements in the given queue. queueSizeV2' op'options handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle] buildOp [] (opDef "QueueSizeV2" & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" description: "The handle to a queue." type: DT_RESOURCE } output_arg { name: "size" description: "The number of elements in the given queue." type: DT_INT32 } -} -- | Compute the 1-dimensional discrete Fourier Transform of a real-valued signal -- -- over the inner-most dimension of `input`. -- -- Since the DFT of a real signal is Hermitian-symmetric, `RFFT` only returns the -- `fft_length / 2 + 1` unique components of the FFT: the zero-frequency term, -- followed by the `fft_length / 2` positive-frequency terms. rFFT :: Tensor v'1 Float -- ^ __input__: A float32 tensor. -> Tensor v'2 Data.Int.Int32 -- ^ __fft_length__: An int32 tensor of shape [1]. The FFT length. -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__: A complex64 tensor of the same rank as `input`. The inner-most -- dimension of `input` is replaced with the `fft_length / 2 + 1` unique -- frequency components of its 1D Fourier Transform. -- -- @compatibility(numpy) -- Equivalent to np.fft.rfft -- @end_compatibility rFFT = rFFT' id rFFT' :: OpParams -> Tensor v'1 Float -- ^ __input__: A float32 tensor. -> Tensor v'2 Data.Int.Int32 -- ^ __fft_length__: An int32 tensor of shape [1]. The FFT length. -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__: A complex64 tensor of the same rank as `input`. The inner-most -- dimension of `input` is replaced with the `fft_length / 2 + 1` unique -- frequency components of its 1D Fourier Transform. -- -- @compatibility(numpy) -- Equivalent to np.fft.rfft -- @end_compatibility rFFT' op'options input fft_length | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs fft_length] return (opDef "RFFT" & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "A float32 tensor." type: DT_FLOAT } input_arg { name: "fft_length" description: "An int32 tensor of shape [1]. The FFT length." type: DT_INT32 } output_arg { name: "output" description: "A complex64 tensor of the same rank as `input`. The inner-most\n dimension of `input` is replaced with the `fft_length / 2 + 1` unique\n frequency components of its 1D Fourier Transform.\n\n@compatibility(numpy)\nEquivalent to np.fft.rfft\n@end_compatibility" type: DT_COMPLEX64 } -} -- | Compute the 2-dimensional discrete Fourier Transform of a real-valued signal -- -- over the inner-most 2 dimensions of `input`. -- -- Since the DFT of a real signal is Hermitian-symmetric, `RFFT2D` only returns the -- `fft_length / 2 + 1` unique components of the FFT for the inner-most dimension -- of `output`: the zero-frequency term, followed by the `fft_length / 2` -- positive-frequency terms. rFFT2D :: Tensor v'1 Float -- ^ __input__: A float32 tensor. -> Tensor v'2 Data.Int.Int32 -- ^ __fft_length__: An int32 tensor of shape [2]. The FFT length for each dimension. -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__: A complex64 tensor of the same rank as `input`. The inner-most 2 -- dimensions of `input` are replaced with their 2D Fourier Transform. The -- inner-most dimension contains `fft_length / 2 + 1` unique frequency -- components. -- -- @compatibility(numpy) -- Equivalent to np.fft.rfft2 -- @end_compatibility rFFT2D = rFFT2D' id rFFT2D' :: OpParams -> Tensor v'1 Float -- ^ __input__: A float32 tensor. -> Tensor v'2 Data.Int.Int32 -- ^ __fft_length__: An int32 tensor of shape [2]. The FFT length for each dimension. -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__: A complex64 tensor of the same rank as `input`. The inner-most 2 -- dimensions of `input` are replaced with their 2D Fourier Transform. The -- inner-most dimension contains `fft_length / 2 + 1` unique frequency -- components. -- -- @compatibility(numpy) -- Equivalent to np.fft.rfft2 -- @end_compatibility rFFT2D' op'options input fft_length | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs fft_length] return (opDef "RFFT2D" & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "A float32 tensor." type: DT_FLOAT } input_arg { name: "fft_length" description: "An int32 tensor of shape [2]. The FFT length for each dimension." type: DT_INT32 } output_arg { name: "output" description: "A complex64 tensor of the same rank as `input`. The inner-most 2\n dimensions of `input` are replaced with their 2D Fourier Transform. The\n inner-most dimension contains `fft_length / 2 + 1` unique frequency\n components.\n\n@compatibility(numpy)\nEquivalent to np.fft.rfft2\n@end_compatibility" type: DT_COMPLEX64 } -} -- | Compute the 3-dimensional discrete Fourier Transform of a real-valued signal -- -- over the inner-most 3 dimensions of `input`. -- -- Since the DFT of a real signal is Hermitian-symmetric, `RFFT3D` only returns the -- `fft_length / 2 + 1` unique components of the FFT for the inner-most dimension -- of `output`: the zero-frequency term, followed by the `fft_length / 2` -- positive-frequency terms. rFFT3D :: Tensor v'1 Float -- ^ __input__: A float32 tensor. -> Tensor v'2 Data.Int.Int32 -- ^ __fft_length__: An int32 tensor of shape [3]. The FFT length for each dimension. -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__: A complex64 tensor of the same rank as `input`. The inner-most 3 -- dimensions of `input` are replaced with the their 3D Fourier Transform. The -- inner-most dimension contains `fft_length / 2 + 1` unique frequency -- components. -- -- @compatibility(numpy) -- Equivalent to np.fft.rfftn with 3 dimensions. -- @end_compatibility rFFT3D = rFFT3D' id rFFT3D' :: OpParams -> Tensor v'1 Float -- ^ __input__: A float32 tensor. -> Tensor v'2 Data.Int.Int32 -- ^ __fft_length__: An int32 tensor of shape [3]. The FFT length for each dimension. -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__: A complex64 tensor of the same rank as `input`. The inner-most 3 -- dimensions of `input` are replaced with the their 3D Fourier Transform. The -- inner-most dimension contains `fft_length / 2 + 1` unique frequency -- components. -- -- @compatibility(numpy) -- Equivalent to np.fft.rfftn with 3 dimensions. -- @end_compatibility rFFT3D' op'options input fft_length | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs fft_length] return (opDef "RFFT3D" & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "A float32 tensor." type: DT_FLOAT } input_arg { name: "fft_length" description: "An int32 tensor of shape [3]. The FFT length for each dimension." type: DT_INT32 } output_arg { name: "output" description: "A complex64 tensor of the same rank as `input`. The inner-most 3\n dimensions of `input` are replaced with the their 3D Fourier Transform. The\n inner-most dimension contains `fft_length / 2 + 1` unique frequency\n components.\n\n@compatibility(numpy)\nEquivalent to np.fft.rfftn with 3 dimensions.\n@end_compatibility" type: DT_COMPLEX64 } -} -- | Converts one or more images from RGB to HSV. -- -- Outputs a tensor of the same shape as the `images` tensor, containing the HSV -- value of the pixels. The output is only well defined if the value in `images` -- are in `[0,1]`. -- -- `output[..., 0]` contains hue, `output[..., 1]` contains saturation, and -- `output[..., 2]` contains value. All HSV values are in `[0,1]`. A hue of 0 -- corresponds to pure red, hue 1/3 is pure green, and 2/3 is pure blue. rGBToHSV :: forall v'1 t . (OneOf '[Double, Float] t) => Tensor v'1 t -- ^ __images__: 1-D or higher rank. RGB data to convert. Last dimension must be size 3. -> Tensor Build t -- ^ __output__: `images` converted to HSV. rGBToHSV = rGBToHSV' id rGBToHSV' :: forall v'1 t . (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __images__: 1-D or higher rank. RGB data to convert. Last dimension must be size 3. -> Tensor Build t -- ^ __output__: `images` converted to HSV. rGBToHSV' op'options images | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs images] return (opDef "RGBToHSV" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "images" description: "1-D or higher rank. RGB data to convert. Last dimension must be size 3." type_attr: "T" } output_arg { name: "output" description: "`images` converted to HSV." type_attr: "T" } attr { name: "T" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } -} -- | Randomly crop `image`. -- -- `size` is a 1-D int64 tensor with 2 elements representing the crop height and -- width. The values must be non negative. -- -- This Op picks a random location in `image` and crops a `height` by `width` -- rectangle from that location. The random location is picked so the cropped -- area will fit inside the original image. randomCrop :: forall v'1 v'2 t m' . (MonadBuild m', OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __image__: 3-D of shape `[height, width, channels]`. -> Tensor v'2 Data.Int.Int64 -- ^ __size__: 1-D of length 2 containing: `crop_height`, `crop_width`.. -> m' (Tensor Value t) -- ^ __output__: 3-D of shape `[crop_height, crop_width, channels].` randomCrop = randomCrop' id randomCrop' :: forall v'1 v'2 t m' . (MonadBuild m', OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __image__: 3-D of shape `[height, width, channels]`. -> Tensor v'2 Data.Int.Int64 -- ^ __size__: 1-D of length 2 containing: `crop_height`, `crop_width`.. -> m' (Tensor Value t) -- ^ __output__: 3-D of shape `[crop_height, crop_width, channels].` randomCrop' op'options image size | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs image, buildInputs size] buildOp [] (opDef "RandomCrop" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "image" description: "3-D of shape `[height, width, channels]`." type_attr: "T" } input_arg { name: "size" description: "1-D of length 2 containing: `crop_height`, `crop_width`.." type: DT_INT64 } output_arg { name: "output" description: "3-D of shape `[crop_height, crop_width, channels].`" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_UINT8 type: DT_INT8 type: DT_INT16 type: DT_INT32 type: DT_INT64 type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "seed" type: "int" default_value { i: 0 } description: "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed." } attr { name: "seed2" type: "int" default_value { i: 0 } description: "An second seed to avoid seed collision." } -} -- | Outputs random values from the Gamma distribution(s) described by alpha. -- -- This op uses the algorithm by Marsaglia et al. to acquire samples via -- transformation-rejection from pairs of uniform and normal random variables. -- See http://dl.acm.org/citation.cfm?id=358414 randomGamma :: forall v'1 v'2 s t m' . (MonadBuild m', OneOf '[Data.Int.Int32, Data.Int.Int64] s, OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 s -- ^ __shape__: 1-D integer tensor. Shape of independent samples to draw from each -- distribution described by the shape parameters given in alpha. -> Tensor v'2 t -- ^ __alpha__: A tensor in which each scalar is a "shape" parameter describing the -- associated gamma distribution. -> m' (Tensor Value t) -- ^ __output__: A tensor with shape `shape + shape(alpha)`. Each slice -- `[:, ..., :, i0, i1, ...iN]` contains the samples drawn for -- `alpha[i0, i1, ...iN]`. The dtype of the output matches the dtype of alpha. randomGamma = randomGamma' id randomGamma' :: forall v'1 v'2 s t m' . (MonadBuild m', OneOf '[Data.Int.Int32, Data.Int.Int64] s, OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 s -- ^ __shape__: 1-D integer tensor. Shape of independent samples to draw from each -- distribution described by the shape parameters given in alpha. -> Tensor v'2 t -- ^ __alpha__: A tensor in which each scalar is a "shape" parameter describing the -- associated gamma distribution. -> m' (Tensor Value t) -- ^ __output__: A tensor with shape `shape + shape(alpha)`. Each slice -- `[:, ..., :, i0, i1, ...iN]` contains the samples drawn for -- `alpha[i0, i1, ...iN]`. The dtype of the output matches the dtype of alpha. randomGamma' op'options shape alpha | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs shape, buildInputs alpha] buildOp [] (opDef "RandomGamma" & opAttr "S" .~ tensorType (undefined :: s) & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "shape" description: "1-D integer tensor. Shape of independent samples to draw from each\ndistribution described by the shape parameters given in alpha." type_attr: "S" } input_arg { name: "alpha" description: "A tensor in which each scalar is a \"shape\" parameter describing the\nassociated gamma distribution." type_attr: "T" } output_arg { name: "output" description: "A tensor with shape `shape + shape(alpha)`. Each slice\n`[:, ..., :, i0, i1, ...iN]` contains the samples drawn for\n`alpha[i0, i1, ...iN]`. The dtype of the output matches the dtype of alpha." type_attr: "T" } attr { name: "seed" type: "int" default_value { i: 0 } description: "If either `seed` or `seed2` are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed." } attr { name: "seed2" type: "int" default_value { i: 0 } description: "A second seed to avoid seed collision." } attr { name: "S" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } -} -- | Outputs random values from the Poisson distribution(s) described by rate. -- -- This op uses two algorithms, depending on rate. If rate >= 10, then -- the algorithm by Hormann is used to acquire samples via -- transformation-rejection. -- See http://www.sciencedirect.com/science/article/pii/0167668793909974. -- -- Otherwise, Knuth's algorithm is used to acquire samples via multiplying uniform -- random variables. -- See Donald E. Knuth (1969). Seminumerical Algorithms. The Art of Computer -- Programming, Volume 2. Addison Wesley randomPoisson :: forall v'1 v'2 s dtype m' . (MonadBuild m', OneOf '[Data.Int.Int32, Data.Int.Int64] s, OneOf '[Data.Word.Word16, Double, Float] dtype) => Tensor v'1 s -- ^ __shape__: 1-D integer tensor. Shape of independent samples to draw from each -- distribution described by the shape parameters given in rate. -> Tensor v'2 dtype -- ^ __rate__: A tensor in which each scalar is a "rate" parameter describing the -- associated poisson distribution. -> m' (Tensor Value dtype) -- ^ __output__: A tensor with shape `shape + shape(rate)`. Each slice -- `[:, ..., :, i0, i1, ...iN]` contains the samples drawn for -- `rate[i0, i1, ...iN]`. The dtype of the output matches the dtype of -- rate. randomPoisson = randomPoisson' id randomPoisson' :: forall v'1 v'2 s dtype m' . (MonadBuild m', OneOf '[Data.Int.Int32, Data.Int.Int64] s, OneOf '[Data.Word.Word16, Double, Float] dtype) => OpParams -> Tensor v'1 s -- ^ __shape__: 1-D integer tensor. Shape of independent samples to draw from each -- distribution described by the shape parameters given in rate. -> Tensor v'2 dtype -- ^ __rate__: A tensor in which each scalar is a "rate" parameter describing the -- associated poisson distribution. -> m' (Tensor Value dtype) -- ^ __output__: A tensor with shape `shape + shape(rate)`. Each slice -- `[:, ..., :, i0, i1, ...iN]` contains the samples drawn for -- `rate[i0, i1, ...iN]`. The dtype of the output matches the dtype of -- rate. randomPoisson' op'options shape rate | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs shape, buildInputs rate] buildOp [] (opDef "RandomPoisson" & opAttr "S" .~ tensorType (undefined :: s) & opAttr "dtype" .~ tensorType (undefined :: dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "shape" description: "1-D integer tensor. Shape of independent samples to draw from each\ndistribution described by the shape parameters given in rate." type_attr: "S" } input_arg { name: "rate" description: "A tensor in which each scalar is a \"rate\" parameter describing the\nassociated poisson distribution." type_attr: "dtype" } output_arg { name: "output" description: "A tensor with shape `shape + shape(rate)`. Each slice\n`[:, ..., :, i0, i1, ...iN]` contains the samples drawn for\n`rate[i0, i1, ...iN]`. The dtype of the output matches the dtype of\nrate." type_attr: "dtype" } attr { name: "seed" type: "int" default_value { i: 0 } description: "If either `seed` or `seed2` are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed." } attr { name: "seed2" type: "int" default_value { i: 0 } description: "A second seed to avoid seed collision." } attr { name: "S" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "dtype" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } -} -- | Randomly shuffles a tensor along its first dimension. -- -- The tensor is shuffled along dimension 0, such that each `value[j]` is mapped -- to one and only one `output[i]`. For example, a mapping that might occur for a -- 3x2 tensor is: -- -- ```prettyprint -- [[1, 2], [[5, 6], -- [3, 4], ==> [1, 2], -- [5, 6]] [3, 4]] -- ``` randomShuffle :: forall v'1 t m' . (MonadBuild m', TensorType t) => Tensor v'1 t -- ^ __value__: The tensor to be shuffled. -> m' (Tensor Value t) -- ^ __output__: A tensor of same shape and type as `value`, shuffled along its first -- dimension. randomShuffle = randomShuffle' id randomShuffle' :: forall v'1 t m' . (MonadBuild m', TensorType t) => OpParams -> Tensor v'1 t -- ^ __value__: The tensor to be shuffled. -> m' (Tensor Value t) -- ^ __output__: A tensor of same shape and type as `value`, shuffled along its first -- dimension. randomShuffle' op'options value | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs value] buildOp [] (opDef "RandomShuffle" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "value" description: "The tensor to be shuffled." type_attr: "T" } output_arg { name: "output" description: "A tensor of same shape and type as `value`, shuffled along its first\ndimension." type_attr: "T" } attr { name: "seed" type: "int" default_value { i: 0 } description: "If either `seed` or `seed2` are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed." } attr { name: "seed2" type: "int" default_value { i: 0 } description: "A second seed to avoid seed collision." } attr { name: "T" type: "type" } -} -- | A queue that randomizes the order of elements. randomShuffleQueue :: forall m' . (MonadBuild m') => [DataType] -- ^ __component_types__: The type of each component in a value. -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __handle__: The handle to the queue. randomShuffleQueue = randomShuffleQueue' id randomShuffleQueue' :: forall m' . (MonadBuild m') => OpParams -> [DataType] -- ^ __component_types__: The type of each component in a value. -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __handle__: The handle to the queue. randomShuffleQueue' op'options component_types | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "RandomShuffleQueue" & opAttr "component_types" .~ component_types & op'options & opInputs .~ op'inputs) {- output_arg { name: "handle" description: "The handle to the queue." type: DT_STRING is_ref: true } attr { name: "component_types" type: "list(type)" description: "The type of each component in a value." has_minimum: true minimum: 1 } attr { name: "shapes" type: "list(shape)" default_value { list { } } description: "The shape of each component in a value. The length of this attr must\nbe either 0 or the same as the length of component_types. If the length of\nthis attr is 0, the shapes of queue elements are not constrained, and\nonly one element may be dequeued at a time." has_minimum: true } attr { name: "capacity" type: "int" default_value { i: -1 } description: "The upper bound on the number of elements in this queue.\nNegative numbers mean no limit." } attr { name: "min_after_dequeue" type: "int" default_value { i: 0 } description: "Dequeue will block unless there would be this\nmany elements after the dequeue or the queue is closed. This\nensures a minimum level of mixing of elements." } attr { name: "seed" type: "int" default_value { i: 0 } description: "If either seed or seed2 is set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, a random seed is used." } attr { name: "seed2" type: "int" default_value { i: 0 } description: "A second seed to avoid seed collision." } attr { name: "container" type: "string" default_value { s: "" } description: "If non-empty, this queue is placed in the given container.\nOtherwise, a default container is used." } attr { name: "shared_name" type: "string" default_value { s: "" } description: "If non-empty, this queue will be shared under the given name\nacross multiple sessions." } -} -- | A queue that randomizes the order of elements. randomShuffleQueueV2 :: forall m' . (MonadBuild m') => [DataType] -- ^ __component_types__: The type of each component in a value. -> m' (Tensor Value ResourceHandle) -- ^ __handle__: The handle to the queue. randomShuffleQueueV2 = randomShuffleQueueV2' id randomShuffleQueueV2' :: forall m' . (MonadBuild m') => OpParams -> [DataType] -- ^ __component_types__: The type of each component in a value. -> m' (Tensor Value ResourceHandle) -- ^ __handle__: The handle to the queue. randomShuffleQueueV2' op'options component_types | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "RandomShuffleQueueV2" & opAttr "component_types" .~ component_types & op'options & opInputs .~ op'inputs) {- output_arg { name: "handle" description: "The handle to the queue." type: DT_RESOURCE } attr { name: "component_types" type: "list(type)" description: "The type of each component in a value." has_minimum: true minimum: 1 } attr { name: "shapes" type: "list(shape)" default_value { list { } } description: "The shape of each component in a value. The length of this attr must\nbe either 0 or the same as the length of component_types. If the length of\nthis attr is 0, the shapes of queue elements are not constrained, and\nonly one element may be dequeued at a time." has_minimum: true } attr { name: "capacity" type: "int" default_value { i: -1 } description: "The upper bound on the number of elements in this queue.\nNegative numbers mean no limit." } attr { name: "min_after_dequeue" type: "int" default_value { i: 0 } description: "Dequeue will block unless there would be this\nmany elements after the dequeue or the queue is closed. This\nensures a minimum level of mixing of elements." } attr { name: "seed" type: "int" default_value { i: 0 } description: "If either seed or seed2 is set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, a random seed is used." } attr { name: "seed2" type: "int" default_value { i: 0 } description: "A second seed to avoid seed collision." } attr { name: "container" type: "string" default_value { s: "" } description: "If non-empty, this queue is placed in the given container.\nOtherwise, a default container is used." } attr { name: "shared_name" type: "string" default_value { s: "" } description: "If non-empty, this queue will be shared under the given name\nacross multiple sessions." } -} -- | Outputs random values from a normal distribution. -- -- The generated values will have mean 0 and standard deviation 1. randomStandardNormal :: forall v'1 dtype t m' . (MonadBuild m', OneOf '[Data.Word.Word16, Double, Float] dtype, OneOf '[Data.Int.Int32, Data.Int.Int64] t) => Tensor v'1 t -- ^ __shape__: The shape of the output tensor. -> m' (Tensor Value dtype) -- ^ __output__: A tensor of the specified shape filled with random normal values. randomStandardNormal = randomStandardNormal' id randomStandardNormal' :: forall v'1 dtype t m' . (MonadBuild m', OneOf '[Data.Word.Word16, Double, Float] dtype, OneOf '[Data.Int.Int32, Data.Int.Int64] t) => OpParams -> Tensor v'1 t -- ^ __shape__: The shape of the output tensor. -> m' (Tensor Value dtype) -- ^ __output__: A tensor of the specified shape filled with random normal values. randomStandardNormal' op'options shape | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs shape] buildOp [] (opDef "RandomStandardNormal" & opAttr "dtype" .~ tensorType (undefined :: dtype) & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "shape" description: "The shape of the output tensor." type_attr: "T" } output_arg { name: "output" description: "A tensor of the specified shape filled with random normal values." type_attr: "dtype" } attr { name: "seed" type: "int" default_value { i: 0 } description: "If either `seed` or `seed2` are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed." } attr { name: "seed2" type: "int" default_value { i: 0 } description: "A second seed to avoid seed collision." } attr { name: "dtype" type: "type" description: "The type of the output." allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "T" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | Outputs random values from a uniform distribution. -- -- The generated values follow a uniform distribution in the range `[0, 1)`. The -- lower bound 0 is included in the range, while the upper bound 1 is excluded. randomUniform :: forall v'1 dtype t m' . (MonadBuild m', OneOf '[Data.Word.Word16, Double, Float] dtype, OneOf '[Data.Int.Int32, Data.Int.Int64] t) => Tensor v'1 t -- ^ __shape__: The shape of the output tensor. -> m' (Tensor Value dtype) -- ^ __output__: A tensor of the specified shape filled with uniform random values. randomUniform = randomUniform' id randomUniform' :: forall v'1 dtype t m' . (MonadBuild m', OneOf '[Data.Word.Word16, Double, Float] dtype, OneOf '[Data.Int.Int32, Data.Int.Int64] t) => OpParams -> Tensor v'1 t -- ^ __shape__: The shape of the output tensor. -> m' (Tensor Value dtype) -- ^ __output__: A tensor of the specified shape filled with uniform random values. randomUniform' op'options shape | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs shape] buildOp [] (opDef "RandomUniform" & opAttr "dtype" .~ tensorType (undefined :: dtype) & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "shape" description: "The shape of the output tensor." type_attr: "T" } output_arg { name: "output" description: "A tensor of the specified shape filled with uniform random values." type_attr: "dtype" } attr { name: "seed" type: "int" default_value { i: 0 } description: "If either `seed` or `seed2` are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed." } attr { name: "seed2" type: "int" default_value { i: 0 } description: "A second seed to avoid seed collision." } attr { name: "dtype" type: "type" description: "The type of the output." allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "T" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | Outputs random integers from a uniform distribution. -- -- The generated values are uniform integers in the range `[minval, maxval)`. -- The lower bound `minval` is included in the range, while the upper bound -- `maxval` is excluded. -- -- The random integers are slightly biased unless `maxval - minval` is an exact -- power of two. The bias is small for values of `maxval - minval` significantly -- smaller than the range of the output (either `2^32` or `2^64`). randomUniformInt :: forall v'1 v'2 v'3 tout t m' . (MonadBuild m', OneOf '[Data.Int.Int32, Data.Int.Int64] tout, OneOf '[Data.Int.Int32, Data.Int.Int64] t) => Tensor v'1 t -- ^ __shape__: The shape of the output tensor. -> Tensor v'2 tout -- ^ __minval__: 0-D. Inclusive lower bound on the generated integers. -> Tensor v'3 tout -- ^ __maxval__: 0-D. Exclusive upper bound on the generated integers. -> m' (Tensor Value tout) -- ^ __output__: A tensor of the specified shape filled with uniform random integers. randomUniformInt = randomUniformInt' id randomUniformInt' :: forall v'1 v'2 v'3 tout t m' . (MonadBuild m', OneOf '[Data.Int.Int32, Data.Int.Int64] tout, OneOf '[Data.Int.Int32, Data.Int.Int64] t) => OpParams -> Tensor v'1 t -- ^ __shape__: The shape of the output tensor. -> Tensor v'2 tout -- ^ __minval__: 0-D. Inclusive lower bound on the generated integers. -> Tensor v'3 tout -- ^ __maxval__: 0-D. Exclusive upper bound on the generated integers. -> m' (Tensor Value tout) -- ^ __output__: A tensor of the specified shape filled with uniform random integers. randomUniformInt' op'options shape minval maxval | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs shape, buildInputs minval, buildInputs maxval] buildOp [] (opDef "RandomUniformInt" & opAttr "Tout" .~ tensorType (undefined :: tout) & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "shape" description: "The shape of the output tensor." type_attr: "T" } input_arg { name: "minval" description: "0-D. Inclusive lower bound on the generated integers." type_attr: "Tout" } input_arg { name: "maxval" description: "0-D. Exclusive upper bound on the generated integers." type_attr: "Tout" } output_arg { name: "output" description: "A tensor of the specified shape filled with uniform random integers." type_attr: "Tout" } attr { name: "seed" type: "int" default_value { i: 0 } description: "If either `seed` or `seed2` are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed." } attr { name: "seed2" type: "int" default_value { i: 0 } description: "A second seed to avoid seed collision." } attr { name: "Tout" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "T" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | Creates a sequence of numbers. -- -- This operation creates a sequence of numbers that begins at `start` and -- extends by increments of `delta` up to but not including `limit`. -- -- For example: -- -- ``` -- # 'start' is 3 -- # 'limit' is 18 -- # 'delta' is 3 -- tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15] -- ``` range :: forall v'1 v'2 v'3 tidx . (OneOf '[Data.Int.Int32, Data.Int.Int64, Double, Float] tidx) => Tensor v'1 tidx -- ^ __start__: 0-D (scalar). First entry in the sequence. -> Tensor v'2 tidx -- ^ __limit__: 0-D (scalar). Upper limit of sequence, exclusive. -> Tensor v'3 tidx -- ^ __delta__: 0-D (scalar). Optional. Default is 1. Number that increments `start`. -> Tensor Build tidx -- ^ __output__: 1-D. range = range' id range' :: forall v'1 v'2 v'3 tidx . (OneOf '[Data.Int.Int32, Data.Int.Int64, Double, Float] tidx) => OpParams -> Tensor v'1 tidx -- ^ __start__: 0-D (scalar). First entry in the sequence. -> Tensor v'2 tidx -- ^ __limit__: 0-D (scalar). Upper limit of sequence, exclusive. -> Tensor v'3 tidx -- ^ __delta__: 0-D (scalar). Optional. Default is 1. Number that increments `start`. -> Tensor Build tidx -- ^ __output__: 1-D. range' op'options start limit delta | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs start, buildInputs limit, buildInputs delta] return (opDef "Range" & opAttr "Tidx" .~ tensorType (undefined :: tidx) & op'options & opInputs .~ op'inputs) {- input_arg { name: "start" description: "0-D (scalar). First entry in the sequence." type_attr: "Tidx" } input_arg { name: "limit" description: "0-D (scalar). Upper limit of sequence, exclusive." type_attr: "Tidx" } input_arg { name: "delta" description: "0-D (scalar). Optional. Default is 1. Number that increments `start`." type_attr: "Tidx" } output_arg { name: "output" description: "1-D." type_attr: "Tidx" } attr { name: "Tidx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 } } } -} -- | Returns the rank of a tensor. -- -- This operation returns an integer representing the rank of `input`. -- -- For example: -- -- ```prettyprint -- # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] -- # shape of tensor 't' is [2, 2, 3] -- rank(t) ==> 3 -- ``` -- -- **Note**: The rank of a tensor is not the same as the rank of a matrix. The rank -- of a tensor is the number of indices required to uniquely select each element -- of the tensor. Rank is also known as "order", "degree", or "ndims." rank :: forall v'1 t . (TensorType t) => Tensor v'1 t -- ^ __input__ -> Tensor Build Data.Int.Int32 -- ^ __output__ rank = rank' id rank' :: forall v'1 t . (TensorType t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor Build Data.Int.Int32 -- ^ __output__ rank' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "Rank" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type: DT_INT32 } attr { name: "T" type: "type" } -} -- | Reads and outputs the entire contents of the input filename. readFile :: Tensor v'1 Data.ByteString.ByteString -- ^ __filename__ -> Tensor Build Data.ByteString.ByteString -- ^ __contents__ readFile = readFile' id readFile' :: OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __filename__ -> Tensor Build Data.ByteString.ByteString -- ^ __contents__ readFile' op'options filename | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs filename] return (opDef "ReadFile" & op'options & opInputs .~ op'inputs) {- input_arg { name: "filename" type: DT_STRING } output_arg { name: "contents" type: DT_STRING } -} -- | Reads the value of a variable. -- -- The tensor returned by this operation is immutable. -- -- The value returned by this operation is guaranteed to be influenced by all the -- writes on which this operation depends directly or indirectly, and to not be -- influenced by any of the writes which depend directly or indirectly on this -- operation. readVariableOp :: forall v'1 dtype m' . (MonadBuild m', TensorType dtype) => Tensor v'1 ResourceHandle -- ^ __resource__: handle to the resource in which to store the variable. -> m' (Tensor Value dtype) -- ^ __value__ readVariableOp = readVariableOp' id readVariableOp' :: forall v'1 dtype m' . (MonadBuild m', TensorType dtype) => OpParams -> Tensor v'1 ResourceHandle -- ^ __resource__: handle to the resource in which to store the variable. -> m' (Tensor Value dtype) -- ^ __value__ readVariableOp' op'options resource | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs resource] buildOp [] (opDef "ReadVariableOp" & opAttr "dtype" .~ tensorType (undefined :: dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "resource" description: "handle to the resource in which to store the variable." type: DT_RESOURCE } output_arg { name: "value" type_attr: "dtype" } attr { name: "dtype" type: "type" description: "the dtype of the value." } -} -- | Returns the number of records this Reader has produced. -- -- This is the same as the number of ReaderRead executions that have -- succeeded. readerNumRecordsProduced :: forall m' . (MonadBuild m') => Tensor Ref Data.ByteString.ByteString -- ^ __reader_handle__: Handle to a Reader. -> m' (Tensor Value Data.Int.Int64) -- ^ __records_produced__ readerNumRecordsProduced = readerNumRecordsProduced' id readerNumRecordsProduced' :: forall m' . (MonadBuild m') => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __reader_handle__: Handle to a Reader. -> m' (Tensor Value Data.Int.Int64) -- ^ __records_produced__ readerNumRecordsProduced' op'options reader_handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs reader_handle] buildOp [] (opDef "ReaderNumRecordsProduced" & op'options & opInputs .~ op'inputs) {- input_arg { name: "reader_handle" description: "Handle to a Reader." type: DT_STRING is_ref: true } output_arg { name: "records_produced" type: DT_INT64 } -} -- | Returns the number of records this Reader has produced. -- -- This is the same as the number of ReaderRead executions that have -- succeeded. readerNumRecordsProducedV2 :: forall v'1 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __reader_handle__: Handle to a Reader. -> m' (Tensor Value Data.Int.Int64) -- ^ __records_produced__ readerNumRecordsProducedV2 = readerNumRecordsProducedV2' id readerNumRecordsProducedV2' :: forall v'1 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __reader_handle__: Handle to a Reader. -> m' (Tensor Value Data.Int.Int64) -- ^ __records_produced__ readerNumRecordsProducedV2' op'options reader_handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs reader_handle] buildOp [] (opDef "ReaderNumRecordsProducedV2" & op'options & opInputs .~ op'inputs) {- input_arg { name: "reader_handle" description: "Handle to a Reader." type: DT_RESOURCE } output_arg { name: "records_produced" type: DT_INT64 } -} -- | Returns the number of work units this Reader has finished processing. readerNumWorkUnitsCompleted :: forall m' . (MonadBuild m') => Tensor Ref Data.ByteString.ByteString -- ^ __reader_handle__: Handle to a Reader. -> m' (Tensor Value Data.Int.Int64) -- ^ __units_completed__ readerNumWorkUnitsCompleted = readerNumWorkUnitsCompleted' id readerNumWorkUnitsCompleted' :: forall m' . (MonadBuild m') => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __reader_handle__: Handle to a Reader. -> m' (Tensor Value Data.Int.Int64) -- ^ __units_completed__ readerNumWorkUnitsCompleted' op'options reader_handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs reader_handle] buildOp [] (opDef "ReaderNumWorkUnitsCompleted" & op'options & opInputs .~ op'inputs) {- input_arg { name: "reader_handle" description: "Handle to a Reader." type: DT_STRING is_ref: true } output_arg { name: "units_completed" type: DT_INT64 } -} -- | Returns the number of work units this Reader has finished processing. readerNumWorkUnitsCompletedV2 :: forall v'1 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __reader_handle__: Handle to a Reader. -> m' (Tensor Value Data.Int.Int64) -- ^ __units_completed__ readerNumWorkUnitsCompletedV2 = readerNumWorkUnitsCompletedV2' id readerNumWorkUnitsCompletedV2' :: forall v'1 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __reader_handle__: Handle to a Reader. -> m' (Tensor Value Data.Int.Int64) -- ^ __units_completed__ readerNumWorkUnitsCompletedV2' op'options reader_handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs reader_handle] buildOp [] (opDef "ReaderNumWorkUnitsCompletedV2" & op'options & opInputs .~ op'inputs) {- input_arg { name: "reader_handle" description: "Handle to a Reader." type: DT_RESOURCE } output_arg { name: "units_completed" type: DT_INT64 } -} -- | Returns the next record (key, value pair) produced by a Reader. -- -- Will dequeue from the input queue if necessary (e.g. when the -- Reader needs to start reading from a new file since it has finished -- with the previous file). readerRead :: forall m' . (MonadBuild m') => Tensor Ref Data.ByteString.ByteString -- ^ __reader_handle__: Handle to a Reader. -> Tensor Ref Data.ByteString.ByteString -- ^ __queue_handle__: Handle to a Queue, with string work items. -> m' ((Tensor Value Data.ByteString.ByteString, Tensor Value Data.ByteString.ByteString)) -- ^ (__key__, __value__) -- -- * __key__: A scalar. -- -- * __value__: A scalar. readerRead = readerRead' id readerRead' :: forall m' . (MonadBuild m') => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __reader_handle__: Handle to a Reader. -> Tensor Ref Data.ByteString.ByteString -- ^ __queue_handle__: Handle to a Queue, with string work items. -> m' ((Tensor Value Data.ByteString.ByteString, Tensor Value Data.ByteString.ByteString)) -- ^ (__key__, __value__) -- -- * __key__: A scalar. -- -- * __value__: A scalar. readerRead' op'options reader_handle queue_handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs reader_handle, buildInputs queue_handle] buildOp [] (opDef "ReaderRead" & op'options & opInputs .~ op'inputs) {- input_arg { name: "reader_handle" description: "Handle to a Reader." type: DT_STRING is_ref: true } input_arg { name: "queue_handle" description: "Handle to a Queue, with string work items." type: DT_STRING is_ref: true } output_arg { name: "key" description: "A scalar." type: DT_STRING } output_arg { name: "value" description: "A scalar." type: DT_STRING } -} -- | Returns up to `num_records` (key, value) pairs produced by a Reader. -- -- Will dequeue from the input queue if necessary (e.g. when the -- Reader needs to start reading from a new file since it has finished -- with the previous file). -- It may return less than `num_records` even before the last batch. readerReadUpTo :: forall v'3 m' . (MonadBuild m') => Tensor Ref Data.ByteString.ByteString -- ^ __reader_handle__: Handle to a `Reader`. -> Tensor Ref Data.ByteString.ByteString -- ^ __queue_handle__: Handle to a `Queue`, with string work items. -> Tensor v'3 Data.Int.Int64 -- ^ __num_records__: number of records to read from `Reader`. -> m' ((Tensor Value Data.ByteString.ByteString, Tensor Value Data.ByteString.ByteString)) -- ^ (__keys__, __values__) -- -- * __keys__: A 1-D tensor. -- -- * __values__: A 1-D tensor. readerReadUpTo = readerReadUpTo' id readerReadUpTo' :: forall v'3 m' . (MonadBuild m') => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __reader_handle__: Handle to a `Reader`. -> Tensor Ref Data.ByteString.ByteString -- ^ __queue_handle__: Handle to a `Queue`, with string work items. -> Tensor v'3 Data.Int.Int64 -- ^ __num_records__: number of records to read from `Reader`. -> m' ((Tensor Value Data.ByteString.ByteString, Tensor Value Data.ByteString.ByteString)) -- ^ (__keys__, __values__) -- -- * __keys__: A 1-D tensor. -- -- * __values__: A 1-D tensor. readerReadUpTo' op'options reader_handle queue_handle num_records | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs reader_handle, buildInputs queue_handle, buildInputs num_records] buildOp [] (opDef "ReaderReadUpTo" & op'options & opInputs .~ op'inputs) {- input_arg { name: "reader_handle" description: "Handle to a `Reader`." type: DT_STRING is_ref: true } input_arg { name: "queue_handle" description: "Handle to a `Queue`, with string work items." type: DT_STRING is_ref: true } input_arg { name: "num_records" description: "number of records to read from `Reader`." type: DT_INT64 } output_arg { name: "keys" description: "A 1-D tensor." type: DT_STRING } output_arg { name: "values" description: "A 1-D tensor." type: DT_STRING } -} -- | Returns up to `num_records` (key, value) pairs produced by a Reader. -- -- Will dequeue from the input queue if necessary (e.g. when the -- Reader needs to start reading from a new file since it has finished -- with the previous file). -- It may return less than `num_records` even before the last batch. readerReadUpToV2 :: forall v'1 v'2 v'3 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __reader_handle__: Handle to a `Reader`. -> Tensor v'2 ResourceHandle -- ^ __queue_handle__: Handle to a `Queue`, with string work items. -> Tensor v'3 Data.Int.Int64 -- ^ __num_records__: number of records to read from `Reader`. -> m' ((Tensor Value Data.ByteString.ByteString, Tensor Value Data.ByteString.ByteString)) -- ^ (__keys__, __values__) -- -- * __keys__: A 1-D tensor. -- -- * __values__: A 1-D tensor. readerReadUpToV2 = readerReadUpToV2' id readerReadUpToV2' :: forall v'1 v'2 v'3 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __reader_handle__: Handle to a `Reader`. -> Tensor v'2 ResourceHandle -- ^ __queue_handle__: Handle to a `Queue`, with string work items. -> Tensor v'3 Data.Int.Int64 -- ^ __num_records__: number of records to read from `Reader`. -> m' ((Tensor Value Data.ByteString.ByteString, Tensor Value Data.ByteString.ByteString)) -- ^ (__keys__, __values__) -- -- * __keys__: A 1-D tensor. -- -- * __values__: A 1-D tensor. readerReadUpToV2' op'options reader_handle queue_handle num_records | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs reader_handle, buildInputs queue_handle, buildInputs num_records] buildOp [] (opDef "ReaderReadUpToV2" & op'options & opInputs .~ op'inputs) {- input_arg { name: "reader_handle" description: "Handle to a `Reader`." type: DT_RESOURCE } input_arg { name: "queue_handle" description: "Handle to a `Queue`, with string work items." type: DT_RESOURCE } input_arg { name: "num_records" description: "number of records to read from `Reader`." type: DT_INT64 } output_arg { name: "keys" description: "A 1-D tensor." type: DT_STRING } output_arg { name: "values" description: "A 1-D tensor." type: DT_STRING } -} -- | Returns the next record (key, value pair) produced by a Reader. -- -- Will dequeue from the input queue if necessary (e.g. when the -- Reader needs to start reading from a new file since it has finished -- with the previous file). readerReadV2 :: forall v'1 v'2 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __reader_handle__: Handle to a Reader. -> Tensor v'2 ResourceHandle -- ^ __queue_handle__: Handle to a Queue, with string work items. -> m' ((Tensor Value Data.ByteString.ByteString, Tensor Value Data.ByteString.ByteString)) -- ^ (__key__, __value__) -- -- * __key__: A scalar. -- -- * __value__: A scalar. readerReadV2 = readerReadV2' id readerReadV2' :: forall v'1 v'2 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __reader_handle__: Handle to a Reader. -> Tensor v'2 ResourceHandle -- ^ __queue_handle__: Handle to a Queue, with string work items. -> m' ((Tensor Value Data.ByteString.ByteString, Tensor Value Data.ByteString.ByteString)) -- ^ (__key__, __value__) -- -- * __key__: A scalar. -- -- * __value__: A scalar. readerReadV2' op'options reader_handle queue_handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs reader_handle, buildInputs queue_handle] buildOp [] (opDef "ReaderReadV2" & op'options & opInputs .~ op'inputs) {- input_arg { name: "reader_handle" description: "Handle to a Reader." type: DT_RESOURCE } input_arg { name: "queue_handle" description: "Handle to a Queue, with string work items." type: DT_RESOURCE } output_arg { name: "key" description: "A scalar." type: DT_STRING } output_arg { name: "value" description: "A scalar." type: DT_STRING } -} -- | Restore a Reader to its initial clean state. readerReset :: forall m' . (MonadBuild m') => Tensor Ref Data.ByteString.ByteString -- ^ __reader_handle__: Handle to a Reader. -> m' (ControlNode) readerReset = readerReset' id readerReset' :: forall m' . (MonadBuild m') => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __reader_handle__: Handle to a Reader. -> m' (ControlNode) readerReset' op'options reader_handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs reader_handle] buildOp [] (opDef "ReaderReset" & op'options & opInputs .~ op'inputs) {- input_arg { name: "reader_handle" description: "Handle to a Reader." type: DT_STRING is_ref: true } -} -- | Restore a Reader to its initial clean state. readerResetV2 :: forall v'1 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __reader_handle__: Handle to a Reader. -> m' (ControlNode) readerResetV2 = readerResetV2' id readerResetV2' :: forall v'1 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __reader_handle__: Handle to a Reader. -> m' (ControlNode) readerResetV2' op'options reader_handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs reader_handle] buildOp [] (opDef "ReaderResetV2" & op'options & opInputs .~ op'inputs) {- input_arg { name: "reader_handle" description: "Handle to a Reader." type: DT_RESOURCE } -} -- | Restore a reader to a previously saved state. -- -- Not all Readers support being restored, so this can produce an -- Unimplemented error. readerRestoreState :: forall v'2 m' . (MonadBuild m') => Tensor Ref Data.ByteString.ByteString -- ^ __reader_handle__: Handle to a Reader. -> Tensor v'2 Data.ByteString.ByteString -- ^ __state__: Result of a ReaderSerializeState of a Reader with type -- matching reader_handle. -> m' (ControlNode) readerRestoreState = readerRestoreState' id readerRestoreState' :: forall v'2 m' . (MonadBuild m') => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __reader_handle__: Handle to a Reader. -> Tensor v'2 Data.ByteString.ByteString -- ^ __state__: Result of a ReaderSerializeState of a Reader with type -- matching reader_handle. -> m' (ControlNode) readerRestoreState' op'options reader_handle state | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs reader_handle, buildInputs state] buildOp [] (opDef "ReaderRestoreState" & op'options & opInputs .~ op'inputs) {- input_arg { name: "reader_handle" description: "Handle to a Reader." type: DT_STRING is_ref: true } input_arg { name: "state" description: "Result of a ReaderSerializeState of a Reader with type\nmatching reader_handle." type: DT_STRING } -} -- | Restore a reader to a previously saved state. -- -- Not all Readers support being restored, so this can produce an -- Unimplemented error. readerRestoreStateV2 :: forall v'1 v'2 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __reader_handle__: Handle to a Reader. -> Tensor v'2 Data.ByteString.ByteString -- ^ __state__: Result of a ReaderSerializeState of a Reader with type -- matching reader_handle. -> m' (ControlNode) readerRestoreStateV2 = readerRestoreStateV2' id readerRestoreStateV2' :: forall v'1 v'2 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __reader_handle__: Handle to a Reader. -> Tensor v'2 Data.ByteString.ByteString -- ^ __state__: Result of a ReaderSerializeState of a Reader with type -- matching reader_handle. -> m' (ControlNode) readerRestoreStateV2' op'options reader_handle state | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs reader_handle, buildInputs state] buildOp [] (opDef "ReaderRestoreStateV2" & op'options & opInputs .~ op'inputs) {- input_arg { name: "reader_handle" description: "Handle to a Reader." type: DT_RESOURCE } input_arg { name: "state" description: "Result of a ReaderSerializeState of a Reader with type\nmatching reader_handle." type: DT_STRING } -} -- | Produce a string tensor that encodes the state of a Reader. -- -- Not all Readers support being serialized, so this can produce an -- Unimplemented error. readerSerializeState :: forall m' . (MonadBuild m') => Tensor Ref Data.ByteString.ByteString -- ^ __reader_handle__: Handle to a Reader. -> m' (Tensor Value Data.ByteString.ByteString) -- ^ __state__ readerSerializeState = readerSerializeState' id readerSerializeState' :: forall m' . (MonadBuild m') => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __reader_handle__: Handle to a Reader. -> m' (Tensor Value Data.ByteString.ByteString) -- ^ __state__ readerSerializeState' op'options reader_handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs reader_handle] buildOp [] (opDef "ReaderSerializeState" & op'options & opInputs .~ op'inputs) {- input_arg { name: "reader_handle" description: "Handle to a Reader." type: DT_STRING is_ref: true } output_arg { name: "state" type: DT_STRING } -} -- | Produce a string tensor that encodes the state of a Reader. -- -- Not all Readers support being serialized, so this can produce an -- Unimplemented error. readerSerializeStateV2 :: forall v'1 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __reader_handle__: Handle to a Reader. -> m' (Tensor Value Data.ByteString.ByteString) -- ^ __state__ readerSerializeStateV2 = readerSerializeStateV2' id readerSerializeStateV2' :: forall v'1 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __reader_handle__: Handle to a Reader. -> m' (Tensor Value Data.ByteString.ByteString) -- ^ __state__ readerSerializeStateV2' op'options reader_handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs reader_handle] buildOp [] (opDef "ReaderSerializeStateV2" & op'options & opInputs .~ op'inputs) {- input_arg { name: "reader_handle" description: "Handle to a Reader." type: DT_RESOURCE } output_arg { name: "state" type: DT_STRING } -} -- | Returns the real part of a complex number. -- -- Given a tensor `input` of complex numbers, this operation returns a tensor of -- type `float` that is the real part of each element in `input`. All elements in -- `input` must be complex numbers of the form \\(a + bj\\), where *a* is the real -- part returned by this operation and *b* is the imaginary part. -- -- For example: -- -- ``` -- # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] -- tf.real(input) ==> [-2.25, 3.25] -- ``` real :: forall v'1 t tout . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float)] t, OneOf '[Double, Float] tout) => Tensor v'1 t -- ^ __input__ -> Tensor Build tout -- ^ __output__ real = real' id real' :: forall v'1 t tout . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float)] t, OneOf '[Double, Float] tout) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor Build tout -- ^ __output__ real' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "Real" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tout" .~ tensorType (undefined :: tout) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type_attr: "Tout" } attr { name: "T" type: "type" default_value { type: DT_COMPLEX64 } allowed_values { list { type: DT_COMPLEX64 type: DT_COMPLEX128 } } } attr { name: "Tout" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } -} -- | Returns x / y element-wise for real types. -- -- If `x` and `y` are reals, this will return the floating-point division. -- -- *NOTE*: `Div` supports broadcasting. More about broadcasting -- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) realDiv :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ realDiv = realDiv' id realDiv' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ realDiv' op'options x y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y] return (opDef "RealDiv" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_UINT8 type: DT_INT8 type: DT_UINT16 type: DT_INT16 type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | Computes the reciprocal of x element-wise. -- -- I.e., \\(y = 1 / x\\). reciprocal :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ reciprocal = reciprocal' id reciprocal' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ reciprocal' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "Reciprocal" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | Computes the gradient for the inverse of `x` wrt its input. -- -- Specifically, `grad = -dy * y*y`, where `y = 1/x`, and `dy` -- is the corresponding input gradient. reciprocalGrad :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ reciprocalGrad = reciprocalGrad' id reciprocalGrad' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ reciprocalGrad' op'options x y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y] return (opDef "ReciprocalGrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | Emits randomized records. recordInput :: forall m' . (MonadBuild m') => m' (Tensor Value Data.ByteString.ByteString) -- ^ __records__: A tensor of shape [batch_size]. recordInput = recordInput' id recordInput' :: forall m' . (MonadBuild m') => OpParams -> m' (Tensor Value Data.ByteString.ByteString) -- ^ __records__: A tensor of shape [batch_size]. recordInput' op'options | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "RecordInput" & op'options & opInputs .~ op'inputs) {- output_arg { name: "records" description: "A tensor of shape [batch_size]." type: DT_STRING } attr { name: "file_pattern" type: "string" description: "Glob pattern for the data files." } attr { name: "file_random_seed" type: "int" default_value { i: 301 } description: "Random seeds used to produce randomized records." } attr { name: "file_shuffle_shift_ratio" type: "float" default_value { f: 0.0 } description: "Shifts the list of files after the list is randomly\nshuffled." } attr { name: "file_buffer_size" type: "int" default_value { i: 10000 } description: "The randomization shuffling buffer." } attr { name: "file_parallelism" type: "int" default_value { i: 16 } description: "How many sstables are opened and concurrently iterated over." } attr { name: "batch_size" type: "int" default_value { i: 32 } description: "The batch size." } -} -- | Joins a string Tensor across the given dimensions. -- -- Computes the string join across dimensions in the given string Tensor of shape -- `[d_0, d_1, ..., d_n-1]`. Returns a new Tensor created by joining the input -- strings with the given separator (default: empty string). Negative indices are -- counted backwards from the end, with `-1` being equivalent to `n - 1`. -- -- For example: -- -- ``` -- # tensor `a` is [["a", "b"], ["c", "d"]] -- tf.reduce_join(a, 0) ==> ["ac", "bd"] -- tf.reduce_join(a, 1) ==> ["ab", "cd"] -- tf.reduce_join(a, -2) = tf.reduce_join(a, 0) ==> ["ac", "bd"] -- tf.reduce_join(a, -1) = tf.reduce_join(a, 1) ==> ["ab", "cd"] -- tf.reduce_join(a, 0, keep_dims=True) ==> [["ac", "bd"]] -- tf.reduce_join(a, 1, keep_dims=True) ==> [["ab"], ["cd"]] -- tf.reduce_join(a, 0, separator=".") ==> ["a.c", "b.d"] -- tf.reduce_join(a, [0, 1]) ==> ["acbd"] -- tf.reduce_join(a, [1, 0]) ==> ["abcd"] -- tf.reduce_join(a, []) ==> ["abcd"] -- ``` reduceJoin :: Tensor v'1 Data.ByteString.ByteString -- ^ __inputs__: The input to be joined. All reduced indices must have non-zero size. -> Tensor v'2 Data.Int.Int32 -- ^ __reduction_indices__: The dimensions to reduce over. Dimensions are reduced in the -- order specified. Omitting `reduction_indices` is equivalent to passing -- `[n-1, n-2, ..., 0]`. Negative indices from `-n` to `-1` are supported. -> Tensor Build Data.ByteString.ByteString -- ^ __output__: Has shape equal to that of the input with reduced dimensions removed or -- set to `1` depending on `keep_dims`. reduceJoin = reduceJoin' id reduceJoin' :: OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __inputs__: The input to be joined. All reduced indices must have non-zero size. -> Tensor v'2 Data.Int.Int32 -- ^ __reduction_indices__: The dimensions to reduce over. Dimensions are reduced in the -- order specified. Omitting `reduction_indices` is equivalent to passing -- `[n-1, n-2, ..., 0]`. Negative indices from `-n` to `-1` are supported. -> Tensor Build Data.ByteString.ByteString -- ^ __output__: Has shape equal to that of the input with reduced dimensions removed or -- set to `1` depending on `keep_dims`. reduceJoin' op'options inputs reduction_indices | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs inputs, buildInputs reduction_indices] return (opDef "ReduceJoin" & op'options & opInputs .~ op'inputs) {- input_arg { name: "inputs" description: "The input to be joined. All reduced indices must have non-zero size." type: DT_STRING } input_arg { name: "reduction_indices" description: "The dimensions to reduce over. Dimensions are reduced in the\norder specified. Omitting `reduction_indices` is equivalent to passing\n`[n-1, n-2, ..., 0]`. Negative indices from `-n` to `-1` are supported." type: DT_INT32 } output_arg { name: "output" description: "Has shape equal to that of the input with reduced dimensions removed or\nset to `1` depending on `keep_dims`." type: DT_STRING } attr { name: "keep_dims" type: "bool" default_value { b: false } description: "If `True`, retain reduced dimensions with length `1`." } attr { name: "separator" type: "string" default_value { s: "" } description: "The separator to use when joining." } -} -- | Creates or finds a child frame, and makes `data` available to the child frame. -- -- The unique `frame_name` is used by the `Executor` to identify frames. If -- `is_constant` is true, `output` is a constant in the child frame; otherwise -- it may be changed in the child frame. At most `parallel_iterations` iterations -- are run in parallel in the child frame. refEnter :: forall t m' . (MonadBuild m', TensorType t) => Tensor Ref t -- ^ __data__: The tensor to be made available to the child frame. -> m' (Tensor Ref t) -- ^ __output__: The same tensor as `data`. refEnter = refEnter' id refEnter' :: forall t m' . (MonadBuild m', TensorType t) => OpParams -> Tensor Ref t -- ^ __data__: The tensor to be made available to the child frame. -> m' (Tensor Ref t) -- ^ __output__: The same tensor as `data`. refEnter' op'options data' | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs data'] buildOp [] (opDef "RefEnter" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "data" description: "The tensor to be made available to the child frame." type_attr: "T" is_ref: true } output_arg { name: "output" description: "The same tensor as `data`." type_attr: "T" is_ref: true } attr { name: "T" type: "type" } attr { name: "frame_name" type: "string" description: "The name of the child frame." } attr { name: "is_constant" type: "bool" default_value { b: false } description: "If true, the output is constant within the child frame." } attr { name: "parallel_iterations" type: "int" default_value { i: 10 } description: "The number of iterations allowed to run in parallel." } -} -- | Exits the current frame to its parent frame. -- -- Exit makes its input `data` available to the parent frame. refExit :: forall t m' . (MonadBuild m', TensorType t) => Tensor Ref t -- ^ __data__: The tensor to be made available to the parent frame. -> m' (Tensor Ref t) -- ^ __output__: The same tensor as `data`. refExit = refExit' id refExit' :: forall t m' . (MonadBuild m', TensorType t) => OpParams -> Tensor Ref t -- ^ __data__: The tensor to be made available to the parent frame. -> m' (Tensor Ref t) -- ^ __output__: The same tensor as `data`. refExit' op'options data' | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs data'] buildOp [] (opDef "RefExit" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "data" description: "The tensor to be made available to the parent frame." type_attr: "T" is_ref: true } output_arg { name: "output" description: "The same tensor as `data`." type_attr: "T" is_ref: true } attr { name: "T" type: "type" } -} -- | Return the same ref tensor as the input ref tensor. refIdentity :: forall t m' . (MonadBuild m', TensorType t) => Tensor Ref t -- ^ __input__ -> m' (Tensor Ref t) -- ^ __output__ refIdentity = refIdentity' id refIdentity' :: forall t m' . (MonadBuild m', TensorType t) => OpParams -> Tensor Ref t -- ^ __input__ -> m' (Tensor Ref t) -- ^ __output__ refIdentity' op'options input | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] buildOp [] (opDef "RefIdentity" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" is_ref: true } output_arg { name: "output" type_attr: "T" is_ref: true } attr { name: "T" type: "type" } -} -- | Forwards the value of an available tensor from `inputs` to `output`. -- -- `Merge` waits for at least one of the tensors in `inputs` to become available. -- It is usually combined with `Switch` to implement branching. -- -- `Merge` forwards the first tensor for become available to `output`, and sets -- `value_index` to its index in `inputs`. refMerge :: forall t m' . (MonadBuild m', TensorType t) => [Tensor Ref t] -- ^ __inputs__: The input tensors, exactly one of which will become available. -> m' ((Tensor Ref t, Tensor Value Data.Int.Int32)) -- ^ (__output__, __value_index__) -- -- * __output__: Will be set to the available input tensor. -- -- * __value_index__: The index of the chosen input tensor in `inputs`. refMerge = refMerge' id refMerge' :: forall t m' . (MonadBuild m', TensorType t) => OpParams -> [Tensor Ref t] -- ^ __inputs__: The input tensors, exactly one of which will become available. -> m' ((Tensor Ref t, Tensor Value Data.Int.Int32)) -- ^ (__output__, __value_index__) -- -- * __output__: Will be set to the available input tensor. -- -- * __value_index__: The index of the chosen input tensor in `inputs`. refMerge' op'options inputs | eqLengthGuard [("N", [("inputs", length inputs)])] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs inputs] buildOp [] (opDef "RefMerge" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "N" .~ n & op'options & opInputs .~ op'inputs) where n = fromIntegral (length inputs) :: Int64 {- input_arg { name: "inputs" description: "The input tensors, exactly one of which will become available." type_attr: "T" number_attr: "N" is_ref: true } output_arg { name: "output" description: "Will be set to the available input tensor." type_attr: "T" is_ref: true } output_arg { name: "value_index" description: "The index of the chosen input tensor in `inputs`." type: DT_INT32 } attr { name: "T" type: "type" } attr { name: "N" type: "int" has_minimum: true minimum: 1 } -} -- | Makes its input available to the next iteration. refNextIteration :: forall t m' . (MonadBuild m', TensorType t) => Tensor Ref t -- ^ __data__: The tensor to be made available to the next iteration. -> m' (Tensor Ref t) -- ^ __output__: The same tensor as `data`. refNextIteration = refNextIteration' id refNextIteration' :: forall t m' . (MonadBuild m', TensorType t) => OpParams -> Tensor Ref t -- ^ __data__: The tensor to be made available to the next iteration. -> m' (Tensor Ref t) -- ^ __output__: The same tensor as `data`. refNextIteration' op'options data' | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs data'] buildOp [] (opDef "RefNextIteration" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "data" description: "The tensor to be made available to the next iteration." type_attr: "T" is_ref: true } output_arg { name: "output" description: "The same tensor as `data`." type_attr: "T" is_ref: true } attr { name: "T" type: "type" } -} -- | Forwards the `index`th element of `inputs` to `output`. refSelect :: forall v'1 t m' . (MonadBuild m', TensorType t) => Tensor v'1 Data.Int.Int32 -- ^ __index__: A scalar that determines the input that gets selected. -> [Tensor Ref t] -- ^ __inputs__: A list of ref tensors, one of which will be forwarded to `output`. -> m' (Tensor Ref t) -- ^ __output__: The forwarded tensor. refSelect = refSelect' id refSelect' :: forall v'1 t m' . (MonadBuild m', TensorType t) => OpParams -> Tensor v'1 Data.Int.Int32 -- ^ __index__: A scalar that determines the input that gets selected. -> [Tensor Ref t] -- ^ __inputs__: A list of ref tensors, one of which will be forwarded to `output`. -> m' (Tensor Ref t) -- ^ __output__: The forwarded tensor. refSelect' op'options index inputs | eqLengthGuard [("N", [("inputs", length inputs)])] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs index, buildInputs inputs] buildOp [] (opDef "RefSelect" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "N" .~ n & op'options & opInputs .~ op'inputs) where n = fromIntegral (length inputs) :: Int64 {- input_arg { name: "index" description: "A scalar that determines the input that gets selected." type: DT_INT32 } input_arg { name: "inputs" description: "A list of ref tensors, one of which will be forwarded to `output`." type_attr: "T" number_attr: "N" is_ref: true } output_arg { name: "output" description: "The forwarded tensor." type_attr: "T" is_ref: true } attr { name: "T" type: "type" } attr { name: "N" type: "int" has_minimum: true minimum: 1 } -} -- | Forwards the ref tensor `data` to the output port determined by `pred`. -- -- If `pred` is true, the `data` input is forwarded to `output_true`. Otherwise, -- the data goes to `output_false`. -- -- See also `Switch` and `Merge`. refSwitch :: forall v'2 t m' . (MonadBuild m', TensorType t) => Tensor Ref t -- ^ __data__: The ref tensor to be forwarded to the appropriate output. -> Tensor v'2 Bool -- ^ __pred__: A scalar that specifies which output port will receive data. -> m' ((Tensor Ref t, Tensor Ref t)) -- ^ (__output_false__, __output_true__) -- -- * __output_false__: If `pred` is false, data will be forwarded to this output. -- -- * __output_true__: If `pred` is true, data will be forwarded to this output. refSwitch = refSwitch' id refSwitch' :: forall v'2 t m' . (MonadBuild m', TensorType t) => OpParams -> Tensor Ref t -- ^ __data__: The ref tensor to be forwarded to the appropriate output. -> Tensor v'2 Bool -- ^ __pred__: A scalar that specifies which output port will receive data. -> m' ((Tensor Ref t, Tensor Ref t)) -- ^ (__output_false__, __output_true__) -- -- * __output_false__: If `pred` is false, data will be forwarded to this output. -- -- * __output_true__: If `pred` is true, data will be forwarded to this output. refSwitch' op'options data' pred | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs data', buildInputs pred] buildOp [] (opDef "RefSwitch" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "data" description: "The ref tensor to be forwarded to the appropriate output." type_attr: "T" is_ref: true } input_arg { name: "pred" description: "A scalar that specifies which output port will receive data." type: DT_BOOL } output_arg { name: "output_false" description: "If `pred` is false, data will be forwarded to this output." type_attr: "T" is_ref: true } output_arg { name: "output_true" description: "If `pred` is true, data will be forwarded to this output." type_attr: "T" is_ref: true } attr { name: "T" type: "type" } -} -- | Computes rectified linear: `max(features, 0)`. relu :: forall v'1 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __features__ -> Tensor Build t -- ^ __activations__ relu = relu' id relu' :: forall v'1 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __features__ -> Tensor Build t -- ^ __activations__ relu' op'options features | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs features] return (opDef "Relu" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "features" type_attr: "T" } output_arg { name: "activations" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF } } } -} -- | Computes rectified linear 6: `min(max(features, 0), 6)`. relu6 :: forall v'1 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __features__ -> Tensor Build t -- ^ __activations__ relu6 = relu6' id relu6' :: forall v'1 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __features__ -> Tensor Build t -- ^ __activations__ relu6' op'options features | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs features] return (opDef "Relu6" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "features" type_attr: "T" } output_arg { name: "activations" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF } } } -} -- | Computes rectified linear 6 gradients for a Relu6 operation. relu6Grad :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __gradients__: The backpropagated gradients to the corresponding Relu6 operation. -> Tensor v'2 t -- ^ __features__: The features passed as input to the corresponding Relu6 operation. -> Tensor Build t -- ^ __backprops__: The gradients: -- `gradients * (features > 0) * (features < 6)`. relu6Grad = relu6Grad' id relu6Grad' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __gradients__: The backpropagated gradients to the corresponding Relu6 operation. -> Tensor v'2 t -- ^ __features__: The features passed as input to the corresponding Relu6 operation. -> Tensor Build t -- ^ __backprops__: The gradients: -- `gradients * (features > 0) * (features < 6)`. relu6Grad' op'options gradients features | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs gradients, buildInputs features] return (opDef "Relu6Grad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "gradients" description: "The backpropagated gradients to the corresponding Relu6 operation." type_attr: "T" } input_arg { name: "features" description: "The features passed as input to the corresponding Relu6 operation." type_attr: "T" } output_arg { name: "backprops" description: "The gradients:\n`gradients * (features > 0) * (features < 6)`." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF } } } -} -- | Computes rectified linear gradients for a Relu operation. reluGrad :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __gradients__: The backpropagated gradients to the corresponding Relu operation. -> Tensor v'2 t -- ^ __features__: The features passed as input to the corresponding Relu operation, OR -- the outputs of that operation (both work equivalently). -> Tensor Build t -- ^ __backprops__: `gradients * (features > 0)`. reluGrad = reluGrad' id reluGrad' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __gradients__: The backpropagated gradients to the corresponding Relu operation. -> Tensor v'2 t -- ^ __features__: The features passed as input to the corresponding Relu operation, OR -- the outputs of that operation (both work equivalently). -> Tensor Build t -- ^ __backprops__: `gradients * (features > 0)`. reluGrad' op'options gradients features | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs gradients, buildInputs features] return (opDef "ReluGrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "gradients" description: "The backpropagated gradients to the corresponding Relu operation." type_attr: "T" } input_arg { name: "features" description: "The features passed as input to the corresponding Relu operation, OR\nthe outputs of that operation (both work equivalently)." type_attr: "T" } output_arg { name: "backprops" description: "`gradients * (features > 0)`." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF } } } -} -- | Execute a sub graph on a remote processor transferred by GraphTransferer. -- -- The graph specifications are serialized by protobuf as graph_transfer_info. -- The implementation / limitations may differ for each platform -- and each available peripheral. remoteFusedGraphExecute :: forall v'1 t u . (TensorType t, TensorType u) => Data.Int.Int64 -- ^ __N__ -> [Tensor v'1 t] -- ^ __values__ -> [Tensor Build u] -- ^ __output__ remoteFusedGraphExecute = remoteFusedGraphExecute' id remoteFusedGraphExecute' :: forall v'1 t u . (TensorType t, TensorType u) => OpParams -> Data.Int.Int64 -- ^ __N__ -> [Tensor v'1 t] -- ^ __values__ -> [Tensor Build u] -- ^ __output__ remoteFusedGraphExecute' op'options n values | eqLengthGuard [("M", [("values", length values)])] = pureOp [n] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs values] return (opDef "RemoteFusedGraphExecute" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "U" .~ tensorType (undefined :: u) & opAttr "N" .~ n & opAttr "M" .~ m & op'options & opInputs .~ op'inputs) where m = fromIntegral (length values) :: Int64 {- input_arg { name: "values" type_attr: "T" number_attr: "M" } output_arg { name: "output" type_attr: "U" number_attr: "N" } attr { name: "M" type: "int" has_minimum: true } attr { name: "N" type: "int" has_minimum: true } attr { name: "T" type: "type" } attr { name: "U" type: "type" } attr { name: "serialized_graph_transfer_info" type: "string" } -} -- | Given a quantized tensor described by (input, input_min, input_max), outputs a -- -- range that covers the actual values present in that tensor. This op is -- typically used to produce the requested_output_min and requested_output_max for -- Requantize. requantizationRange :: forall v'1 v'2 v'3 tinput . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tinput) => Tensor v'1 tinput -- ^ __input__ -> Tensor v'2 Float -- ^ __input_min__: The float value that the minimum quantized input value represents. -> Tensor v'3 Float -- ^ __input_max__: The float value that the maximum quantized input value represents. -> (Tensor Build Float, Tensor Build Float) -- ^ (__output_min__, __output_max__) -- -- * __output_min__: The computed min output. -- -- * __output_max__: the computed max output. requantizationRange = requantizationRange' id requantizationRange' :: forall v'1 v'2 v'3 tinput . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tinput) => OpParams -> Tensor v'1 tinput -- ^ __input__ -> Tensor v'2 Float -- ^ __input_min__: The float value that the minimum quantized input value represents. -> Tensor v'3 Float -- ^ __input_max__: The float value that the maximum quantized input value represents. -> (Tensor Build Float, Tensor Build Float) -- ^ (__output_min__, __output_max__) -- -- * __output_min__: The computed min output. -- -- * __output_max__: the computed max output. requantizationRange' op'options input input_min input_max | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs input_min, buildInputs input_max] return (opDef "RequantizationRange" & opAttr "Tinput" .~ tensorType (undefined :: tinput) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "Tinput" } input_arg { name: "input_min" description: "The float value that the minimum quantized input value represents." type: DT_FLOAT } input_arg { name: "input_max" description: "The float value that the maximum quantized input value represents." type: DT_FLOAT } output_arg { name: "output_min" description: "The computed min output." type: DT_FLOAT } output_arg { name: "output_max" description: "the computed max output." type: DT_FLOAT } attr { name: "Tinput" type: "type" description: "The type of the input." allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT16 type: DT_QUINT16 type: DT_QINT32 } } } -} -- | Convert the quantized 'input' tensor into a lower-precision 'output', using the -- -- output range specified with 'requested_output_min' and 'requested_output_max'. -- -- [input_min, input_max] are scalar floats that specify the range for the float -- interpretation of the 'input' data. For example, if input_min is -1.0f and -- input_max is 1.0f, and we are dealing with quint16 quantized data, then a 0 -- value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f. requantize :: forall v'1 v'2 v'3 v'4 v'5 tinput out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tinput, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] out_type) => Tensor v'1 tinput -- ^ __input__ -> Tensor v'2 Float -- ^ __input_min__: The float value that the minimum quantized input value represents. -> Tensor v'3 Float -- ^ __input_max__: The float value that the maximum quantized input value represents. -> Tensor v'4 Float -- ^ __requested_output_min__: The float value that the minimum quantized output value represents. -> Tensor v'5 Float -- ^ __requested_output_max__: The float value that the maximum quantized output value represents. -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __output_min__, __output_max__) -- -- * __output__ -- -- * __output_min__: The requested_output_min value is copied into this output. -- -- * __output_max__: The requested_output_max value is copied into this output. requantize = requantize' id requantize' :: forall v'1 v'2 v'3 v'4 v'5 tinput out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tinput, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] out_type) => OpParams -> Tensor v'1 tinput -- ^ __input__ -> Tensor v'2 Float -- ^ __input_min__: The float value that the minimum quantized input value represents. -> Tensor v'3 Float -- ^ __input_max__: The float value that the maximum quantized input value represents. -> Tensor v'4 Float -- ^ __requested_output_min__: The float value that the minimum quantized output value represents. -> Tensor v'5 Float -- ^ __requested_output_max__: The float value that the maximum quantized output value represents. -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __output_min__, __output_max__) -- -- * __output__ -- -- * __output_min__: The requested_output_min value is copied into this output. -- -- * __output_max__: The requested_output_max value is copied into this output. requantize' op'options input input_min input_max requested_output_min requested_output_max | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs input_min, buildInputs input_max, buildInputs requested_output_min, buildInputs requested_output_max] return (opDef "Requantize" & opAttr "Tinput" .~ tensorType (undefined :: tinput) & opAttr "out_type" .~ tensorType (undefined :: out_type) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "Tinput" } input_arg { name: "input_min" description: "The float value that the minimum quantized input value represents." type: DT_FLOAT } input_arg { name: "input_max" description: "The float value that the maximum quantized input value represents." type: DT_FLOAT } input_arg { name: "requested_output_min" description: "The float value that the minimum quantized output value represents." type: DT_FLOAT } input_arg { name: "requested_output_max" description: "The float value that the maximum quantized output value represents." type: DT_FLOAT } output_arg { name: "output" type_attr: "out_type" } output_arg { name: "output_min" description: "The requested_output_min value is copied into this output." type: DT_FLOAT } output_arg { name: "output_max" description: "The requested_output_max value is copied into this output." type: DT_FLOAT } attr { name: "Tinput" type: "type" description: "The type of the input." allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT16 type: DT_QUINT16 type: DT_QINT32 } } } attr { name: "out_type" type: "type" description: "The type of the output. Should be a lower bit depth than Tinput." allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT16 type: DT_QUINT16 type: DT_QINT32 } } } -} -- | Reshapes a tensor. -- -- Given `tensor`, this operation returns a tensor that has the same values -- as `tensor` with shape `shape`. -- -- If one component of `shape` is the special value -1, the size of that dimension -- is computed so that the total size remains constant. In particular, a `shape` -- of `[-1]` flattens into 1-D. At most one component of `shape` can be -1. -- -- If `shape` is 1-D or higher, then the operation returns a tensor with shape -- `shape` filled with the values of `tensor`. In this case, the number of elements -- implied by `shape` must be the same as the number of elements in `tensor`. -- -- For example: -- -- ```prettyprint -- # tensor 't' is [1, 2, 3, 4, 5, 6, 7, 8, 9] -- # tensor 't' has shape [9] -- reshape(t, [3, 3]) ==> [[1, 2, 3], -- [4, 5, 6], -- [7, 8, 9]] -- -- # tensor 't' is [[[1, 1], [2, 2]], -- # [[3, 3], [4, 4]]] -- # tensor 't' has shape [2, 2, 2] -- reshape(t, [2, 4]) ==> [[1, 1, 2, 2], -- [3, 3, 4, 4]] -- -- # tensor 't' is [[[1, 1, 1], -- # [2, 2, 2]], -- # [[3, 3, 3], -- # [4, 4, 4]], -- # [[5, 5, 5], -- # [6, 6, 6]]] -- # tensor 't' has shape [3, 2, 3] -- # pass '[-1]' to flatten 't' -- reshape(t, [-1]) ==> [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6] -- -- # -1 can also be used to infer the shape -- -- # -1 is inferred to be 9: -- reshape(t, [2, -1]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3], -- [4, 4, 4, 5, 5, 5, 6, 6, 6]] -- # -1 is inferred to be 2: -- reshape(t, [-1, 9]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3], -- [4, 4, 4, 5, 5, 5, 6, 6, 6]] -- # -1 is inferred to be 3: -- reshape(t, [ 2, -1, 3]) ==> [[[1, 1, 1], -- [2, 2, 2], -- [3, 3, 3]], -- [[4, 4, 4], -- [5, 5, 5], -- [6, 6, 6]]] -- -- # tensor 't' is [7] -- # shape `[]` reshapes to a scalar -- reshape(t, []) ==> 7 -- ``` reshape :: forall v'1 v'2 t tshape . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tshape) => Tensor v'1 t -- ^ __tensor__ -> Tensor v'2 tshape -- ^ __shape__: Defines the shape of the output tensor. -> Tensor Build t -- ^ __output__ reshape = reshape' id reshape' :: forall v'1 v'2 t tshape . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tshape) => OpParams -> Tensor v'1 t -- ^ __tensor__ -> Tensor v'2 tshape -- ^ __shape__: Defines the shape of the output tensor. -> Tensor Build t -- ^ __output__ reshape' op'options tensor shape | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs tensor, buildInputs shape] return (opDef "Reshape" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tshape" .~ tensorType (undefined :: tshape) & op'options & opInputs .~ op'inputs) {- input_arg { name: "tensor" type_attr: "T" } input_arg { name: "shape" description: "Defines the shape of the output tensor." type_attr: "Tshape" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "Tshape" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | Resize `images` to `size` using area interpolation. -- -- Input images can be of different types but output images are always float. resizeArea :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __images__: 4-D with shape `[batch, height, width, channels]`. -> Tensor v'2 Data.Int.Int32 -- ^ __size__: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The -- new size for the images. -> Tensor Build Float -- ^ __resized_images__: 4-D with shape -- `[batch, new_height, new_width, channels]`. resizeArea = resizeArea' id resizeArea' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __images__: 4-D with shape `[batch, height, width, channels]`. -> Tensor v'2 Data.Int.Int32 -- ^ __size__: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The -- new size for the images. -> Tensor Build Float -- ^ __resized_images__: 4-D with shape -- `[batch, new_height, new_width, channels]`. resizeArea' op'options images size | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs images, buildInputs size] return (opDef "ResizeArea" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "images" description: "4-D with shape `[batch, height, width, channels]`." type_attr: "T" } input_arg { name: "size" description: "= A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The\nnew size for the images." type: DT_INT32 } output_arg { name: "resized_images" description: "4-D with shape\n`[batch, new_height, new_width, channels]`." type: DT_FLOAT } attr { name: "T" type: "type" allowed_values { list { type: DT_UINT8 type: DT_INT8 type: DT_INT16 type: DT_INT32 type: DT_INT64 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "align_corners" type: "bool" default_value { b: false } description: "If true, rescale input by (new_height - 1) / (height - 1), which\nexactly aligns the 4 corners of images and resized images. If false, rescale\nby new_height / height. Treat similarly the width dimension." } -} -- | Resize `images` to `size` using bicubic interpolation. -- -- Input images can be of different types but output images are always float. resizeBicubic :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __images__: 4-D with shape `[batch, height, width, channels]`. -> Tensor v'2 Data.Int.Int32 -- ^ __size__: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The -- new size for the images. -> Tensor Build Float -- ^ __resized_images__: 4-D with shape -- `[batch, new_height, new_width, channels]`. resizeBicubic = resizeBicubic' id resizeBicubic' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __images__: 4-D with shape `[batch, height, width, channels]`. -> Tensor v'2 Data.Int.Int32 -- ^ __size__: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The -- new size for the images. -> Tensor Build Float -- ^ __resized_images__: 4-D with shape -- `[batch, new_height, new_width, channels]`. resizeBicubic' op'options images size | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs images, buildInputs size] return (opDef "ResizeBicubic" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "images" description: "4-D with shape `[batch, height, width, channels]`." type_attr: "T" } input_arg { name: "size" description: "= A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The\nnew size for the images." type: DT_INT32 } output_arg { name: "resized_images" description: "4-D with shape\n`[batch, new_height, new_width, channels]`." type: DT_FLOAT } attr { name: "T" type: "type" allowed_values { list { type: DT_UINT8 type: DT_INT8 type: DT_INT16 type: DT_INT32 type: DT_INT64 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "align_corners" type: "bool" default_value { b: false } description: "If true, rescale input by (new_height - 1) / (height - 1), which\nexactly aligns the 4 corners of images and resized images. If false, rescale\nby new_height / height. Treat similarly the width dimension." } -} -- | Resize `images` to `size` using bilinear interpolation. -- -- Input images can be of different types but output images are always float. resizeBilinear :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __images__: 4-D with shape `[batch, height, width, channels]`. -> Tensor v'2 Data.Int.Int32 -- ^ __size__: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The -- new size for the images. -> Tensor Build Float -- ^ __resized_images__: 4-D with shape -- `[batch, new_height, new_width, channels]`. resizeBilinear = resizeBilinear' id resizeBilinear' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __images__: 4-D with shape `[batch, height, width, channels]`. -> Tensor v'2 Data.Int.Int32 -- ^ __size__: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The -- new size for the images. -> Tensor Build Float -- ^ __resized_images__: 4-D with shape -- `[batch, new_height, new_width, channels]`. resizeBilinear' op'options images size | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs images, buildInputs size] return (opDef "ResizeBilinear" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "images" description: "4-D with shape `[batch, height, width, channels]`." type_attr: "T" } input_arg { name: "size" description: "= A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The\nnew size for the images." type: DT_INT32 } output_arg { name: "resized_images" description: "4-D with shape\n`[batch, new_height, new_width, channels]`." type: DT_FLOAT } attr { name: "T" type: "type" allowed_values { list { type: DT_UINT8 type: DT_INT8 type: DT_INT16 type: DT_INT32 type: DT_INT64 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "align_corners" type: "bool" default_value { b: false } description: "If true, rescale input by (new_height - 1) / (height - 1), which\nexactly aligns the 4 corners of images and resized images. If false, rescale\nby new_height / height. Treat similarly the width dimension." } -} -- | Computes the gradient of bilinear interpolation. resizeBilinearGrad :: forall v'1 v'2 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 Float -- ^ __grads__: 4-D with shape `[batch, height, width, channels]`. -> Tensor v'2 t -- ^ __original_image__: 4-D with shape `[batch, orig_height, orig_width, channels]`, -- The image tensor that was resized. -> Tensor Build t -- ^ __output__: 4-D with shape `[batch, orig_height, orig_width, channels]`. -- Gradients with respect to the input image. Input image must have been -- float or double. resizeBilinearGrad = resizeBilinearGrad' id resizeBilinearGrad' :: forall v'1 v'2 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 Float -- ^ __grads__: 4-D with shape `[batch, height, width, channels]`. -> Tensor v'2 t -- ^ __original_image__: 4-D with shape `[batch, orig_height, orig_width, channels]`, -- The image tensor that was resized. -> Tensor Build t -- ^ __output__: 4-D with shape `[batch, orig_height, orig_width, channels]`. -- Gradients with respect to the input image. Input image must have been -- float or double. resizeBilinearGrad' op'options grads original_image | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs grads, buildInputs original_image] return (opDef "ResizeBilinearGrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "grads" description: "4-D with shape `[batch, height, width, channels]`." type: DT_FLOAT } input_arg { name: "original_image" description: "4-D with shape `[batch, orig_height, orig_width, channels]`,\nThe image tensor that was resized." type_attr: "T" } output_arg { name: "output" description: "4-D with shape `[batch, orig_height, orig_width, channels]`.\nGradients with respect to the input image. Input image must have been\nfloat or double." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_HALF type: DT_DOUBLE } } } attr { name: "align_corners" type: "bool" default_value { b: false } description: "If true, rescale grads by (orig_height - 1) / (height - 1), which\nexactly aligns the 4 corners of grads and original_image. If false, rescale by\norig_height / height. Treat similarly the width dimension." } -} -- | Resize `images` to `size` using nearest neighbor interpolation. resizeNearestNeighbor :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __images__: 4-D with shape `[batch, height, width, channels]`. -> Tensor v'2 Data.Int.Int32 -- ^ __size__: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The -- new size for the images. -> Tensor Build t -- ^ __resized_images__: 4-D with shape -- `[batch, new_height, new_width, channels]`. resizeNearestNeighbor = resizeNearestNeighbor' id resizeNearestNeighbor' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __images__: 4-D with shape `[batch, height, width, channels]`. -> Tensor v'2 Data.Int.Int32 -- ^ __size__: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The -- new size for the images. -> Tensor Build t -- ^ __resized_images__: 4-D with shape -- `[batch, new_height, new_width, channels]`. resizeNearestNeighbor' op'options images size | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs images, buildInputs size] return (opDef "ResizeNearestNeighbor" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "images" description: "4-D with shape `[batch, height, width, channels]`." type_attr: "T" } input_arg { name: "size" description: "= A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The\nnew size for the images." type: DT_INT32 } output_arg { name: "resized_images" description: "4-D with shape\n`[batch, new_height, new_width, channels]`." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_UINT8 type: DT_INT8 type: DT_INT16 type: DT_INT32 type: DT_INT64 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "align_corners" type: "bool" default_value { b: false } description: "If true, rescale input by (new_height - 1) / (height - 1), which\nexactly aligns the 4 corners of images and resized images. If false, rescale\nby new_height / height. Treat similarly the width dimension." } -} -- | Computes the gradient of nearest neighbor interpolation. resizeNearestNeighborGrad :: forall v'1 v'2 t . (OneOf '[Data.Int.Int32, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __grads__: 4-D with shape `[batch, height, width, channels]`. -> Tensor v'2 Data.Int.Int32 -- ^ __size__: = A 1-D int32 Tensor of 2 elements: `orig_height, orig_width`. The -- original input size. -> Tensor Build t -- ^ __output__: 4-D with shape `[batch, orig_height, orig_width, channels]`. Gradients -- with respect to the input image. resizeNearestNeighborGrad = resizeNearestNeighborGrad' id resizeNearestNeighborGrad' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int32, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __grads__: 4-D with shape `[batch, height, width, channels]`. -> Tensor v'2 Data.Int.Int32 -- ^ __size__: = A 1-D int32 Tensor of 2 elements: `orig_height, orig_width`. The -- original input size. -> Tensor Build t -- ^ __output__: 4-D with shape `[batch, orig_height, orig_width, channels]`. Gradients -- with respect to the input image. resizeNearestNeighborGrad' op'options grads size | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs grads, buildInputs size] return (opDef "ResizeNearestNeighborGrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "grads" description: "4-D with shape `[batch, height, width, channels]`." type_attr: "T" } input_arg { name: "size" description: "= A 1-D int32 Tensor of 2 elements: `orig_height, orig_width`. The\noriginal input size." type: DT_INT32 } output_arg { name: "output" description: "4-D with shape `[batch, orig_height, orig_width, channels]`. Gradients\nwith respect to the input image." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_UINT8 type: DT_INT8 type: DT_INT32 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "align_corners" type: "bool" default_value { b: false } description: "If true, rescale grads by (orig_height - 1) / (height - 1), which\nexactly aligns the 4 corners of grads and original_image. If false, rescale by\norig_height / height. Treat similarly the width dimension." } -} -- | Update '*var' according to the adadelta scheme. -- -- accum = rho() * accum + (1 - rho()) * grad.square(); -- update = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad; -- update_accum = rho() * update_accum + (1 - rho()) * update.square(); -- var -= update; resourceApplyAdadelta :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable(). -> Tensor v'2 ResourceHandle -- ^ __accum__: Should be from a Variable(). -> Tensor v'3 ResourceHandle -- ^ __accum_update__: Should be from a Variable(). -> Tensor v'4 t -- ^ __lr__: Scaling factor. Must be a scalar. -> Tensor v'5 t -- ^ __rho__: Decay factor. Must be a scalar. -> Tensor v'6 t -- ^ __epsilon__: Constant factor. Must be a scalar. -> Tensor v'7 t -- ^ __grad__: The gradient. -> m' (ControlNode) resourceApplyAdadelta = resourceApplyAdadelta' id resourceApplyAdadelta' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable(). -> Tensor v'2 ResourceHandle -- ^ __accum__: Should be from a Variable(). -> Tensor v'3 ResourceHandle -- ^ __accum_update__: Should be from a Variable(). -> Tensor v'4 t -- ^ __lr__: Scaling factor. Must be a scalar. -> Tensor v'5 t -- ^ __rho__: Decay factor. Must be a scalar. -> Tensor v'6 t -- ^ __epsilon__: Constant factor. Must be a scalar. -> Tensor v'7 t -- ^ __grad__: The gradient. -> m' (ControlNode) resourceApplyAdadelta' op'options var accum accum_update lr rho epsilon grad | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs accum, buildInputs accum_update, buildInputs lr, buildInputs rho, buildInputs epsilon, buildInputs grad] buildOp [] (opDef "ResourceApplyAdadelta" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "accum" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "accum_update" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "lr" description: "Scaling factor. Must be a scalar." type_attr: "T" } input_arg { name: "rho" description: "Decay factor. Must be a scalar." type_attr: "T" } input_arg { name: "epsilon" description: "Constant factor. Must be a scalar." type_attr: "T" } input_arg { name: "grad" description: "The gradient." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If True, updating of the var, accum and update_accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention." } -} -- | Update '*var' according to the adagrad scheme. -- -- accum += grad * grad -- var -= lr * grad * (1 / sqrt(accum)) resourceApplyAdagrad :: forall v'1 v'2 v'3 v'4 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable(). -> Tensor v'2 ResourceHandle -- ^ __accum__: Should be from a Variable(). -> Tensor v'3 t -- ^ __lr__: Scaling factor. Must be a scalar. -> Tensor v'4 t -- ^ __grad__: The gradient. -> m' (ControlNode) resourceApplyAdagrad = resourceApplyAdagrad' id resourceApplyAdagrad' :: forall v'1 v'2 v'3 v'4 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable(). -> Tensor v'2 ResourceHandle -- ^ __accum__: Should be from a Variable(). -> Tensor v'3 t -- ^ __lr__: Scaling factor. Must be a scalar. -> Tensor v'4 t -- ^ __grad__: The gradient. -> m' (ControlNode) resourceApplyAdagrad' op'options var accum lr grad | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs accum, buildInputs lr, buildInputs grad] buildOp [] (opDef "ResourceApplyAdagrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "accum" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "lr" description: "Scaling factor. Must be a scalar." type_attr: "T" } input_arg { name: "grad" description: "The gradient." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention." } -} -- | Update '*var' according to the proximal adagrad scheme. resourceApplyAdagradDA :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable(). -> Tensor v'2 ResourceHandle -- ^ __gradient_accumulator__: Should be from a Variable(). -> Tensor v'3 ResourceHandle -- ^ __gradient_squared_accumulator__: Should be from a Variable(). -> Tensor v'4 t -- ^ __grad__: The gradient. -> Tensor v'5 t -- ^ __lr__: Scaling factor. Must be a scalar. -> Tensor v'6 t -- ^ __l1__: L1 regularization. Must be a scalar. -> Tensor v'7 t -- ^ __l2__: L2 regularization. Must be a scalar. -> Tensor v'8 Data.Int.Int64 -- ^ __global_step__: Training step number. Must be a scalar. -> m' (ControlNode) resourceApplyAdagradDA = resourceApplyAdagradDA' id resourceApplyAdagradDA' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable(). -> Tensor v'2 ResourceHandle -- ^ __gradient_accumulator__: Should be from a Variable(). -> Tensor v'3 ResourceHandle -- ^ __gradient_squared_accumulator__: Should be from a Variable(). -> Tensor v'4 t -- ^ __grad__: The gradient. -> Tensor v'5 t -- ^ __lr__: Scaling factor. Must be a scalar. -> Tensor v'6 t -- ^ __l1__: L1 regularization. Must be a scalar. -> Tensor v'7 t -- ^ __l2__: L2 regularization. Must be a scalar. -> Tensor v'8 Data.Int.Int64 -- ^ __global_step__: Training step number. Must be a scalar. -> m' (ControlNode) resourceApplyAdagradDA' op'options var gradient_accumulator gradient_squared_accumulator grad lr l1 l2 global_step | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs gradient_accumulator, buildInputs gradient_squared_accumulator, buildInputs grad, buildInputs lr, buildInputs l1, buildInputs l2, buildInputs global_step] buildOp [] (opDef "ResourceApplyAdagradDA" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "gradient_accumulator" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "gradient_squared_accumulator" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "grad" description: "The gradient." type_attr: "T" } input_arg { name: "lr" description: "Scaling factor. Must be a scalar." type_attr: "T" } input_arg { name: "l1" description: "L1 regularization. Must be a scalar." type_attr: "T" } input_arg { name: "l2" description: "L2 regularization. Must be a scalar." type_attr: "T" } input_arg { name: "global_step" description: "Training step number. Must be a scalar." type: DT_INT64 } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention." } -} -- | Update '*var' according to the Adam algorithm. -- -- lr_t <- learning_rate * sqrt(1 - beta2^t) / (1 - beta1^t) -- m_t <- beta1 * m_{t-1} + (1 - beta1) * g_t -- v_t <- beta2 * v_{t-1} + (1 - beta2) * g_t * g_t -- variable <- variable - lr_t * m_t / (sqrt(v_t) + epsilon) resourceApplyAdam :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 v'10 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable(). -> Tensor v'2 ResourceHandle -- ^ __m__: Should be from a Variable(). -> Tensor v'3 ResourceHandle -- ^ __v__: Should be from a Variable(). -> Tensor v'4 t -- ^ __beta1_power__: Must be a scalar. -> Tensor v'5 t -- ^ __beta2_power__: Must be a scalar. -> Tensor v'6 t -- ^ __lr__: Scaling factor. Must be a scalar. -> Tensor v'7 t -- ^ __beta1__: Momentum factor. Must be a scalar. -> Tensor v'8 t -- ^ __beta2__: Momentum factor. Must be a scalar. -> Tensor v'9 t -- ^ __epsilon__: Ridge term. Must be a scalar. -> Tensor v'10 t -- ^ __grad__: The gradient. -> m' (ControlNode) resourceApplyAdam = resourceApplyAdam' id resourceApplyAdam' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 v'10 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable(). -> Tensor v'2 ResourceHandle -- ^ __m__: Should be from a Variable(). -> Tensor v'3 ResourceHandle -- ^ __v__: Should be from a Variable(). -> Tensor v'4 t -- ^ __beta1_power__: Must be a scalar. -> Tensor v'5 t -- ^ __beta2_power__: Must be a scalar. -> Tensor v'6 t -- ^ __lr__: Scaling factor. Must be a scalar. -> Tensor v'7 t -- ^ __beta1__: Momentum factor. Must be a scalar. -> Tensor v'8 t -- ^ __beta2__: Momentum factor. Must be a scalar. -> Tensor v'9 t -- ^ __epsilon__: Ridge term. Must be a scalar. -> Tensor v'10 t -- ^ __grad__: The gradient. -> m' (ControlNode) resourceApplyAdam' op'options var m v beta1_power beta2_power lr beta1 beta2 epsilon grad | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs m, buildInputs v, buildInputs beta1_power, buildInputs beta2_power, buildInputs lr, buildInputs beta1, buildInputs beta2, buildInputs epsilon, buildInputs grad] buildOp [] (opDef "ResourceApplyAdam" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "m" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "v" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "beta1_power" description: "Must be a scalar." type_attr: "T" } input_arg { name: "beta2_power" description: "Must be a scalar." type_attr: "T" } input_arg { name: "lr" description: "Scaling factor. Must be a scalar." type_attr: "T" } input_arg { name: "beta1" description: "Momentum factor. Must be a scalar." type_attr: "T" } input_arg { name: "beta2" description: "Momentum factor. Must be a scalar." type_attr: "T" } input_arg { name: "epsilon" description: "Ridge term. Must be a scalar." type_attr: "T" } input_arg { name: "grad" description: "The gradient." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If `True`, updating of the var, m, and v tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention." } -} -- | Update '*var' according to the centered RMSProp algorithm. -- -- The centered RMSProp algorithm uses an estimate of the centered second moment -- (i.e., the variance) for normalization, as opposed to regular RMSProp, which -- uses the (uncentered) second moment. This often helps with training, but is -- slightly more expensive in terms of computation and memory. -- -- Note that in dense implementation of this algorithm, mg, ms, and mom will -- update even if the grad is zero, but in this sparse implementation, mg, ms, -- and mom will not update in iterations during which the grad is zero. -- -- mean_square = decay * mean_square + (1-decay) * gradient ** 2 -- mean_grad = decay * mean_grad + (1-decay) * gradient -- -- Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2) -- -- mg <- rho * mg_{t-1} + (1-rho) * grad -- ms <- rho * ms_{t-1} + (1-rho) * grad * grad -- mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon) -- var <- var - mom resourceApplyCenteredRMSProp :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable(). -> Tensor v'2 ResourceHandle -- ^ __mg__: Should be from a Variable(). -> Tensor v'3 ResourceHandle -- ^ __ms__: Should be from a Variable(). -> Tensor v'4 ResourceHandle -- ^ __mom__: Should be from a Variable(). -> Tensor v'5 t -- ^ __lr__: Scaling factor. Must be a scalar. -> Tensor v'6 t -- ^ __rho__: Decay rate. Must be a scalar. -> Tensor v'7 t -- ^ __momentum__ -> Tensor v'8 t -- ^ __epsilon__: Ridge term. Must be a scalar. -> Tensor v'9 t -- ^ __grad__: The gradient. -> m' (ControlNode) resourceApplyCenteredRMSProp = resourceApplyCenteredRMSProp' id resourceApplyCenteredRMSProp' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable(). -> Tensor v'2 ResourceHandle -- ^ __mg__: Should be from a Variable(). -> Tensor v'3 ResourceHandle -- ^ __ms__: Should be from a Variable(). -> Tensor v'4 ResourceHandle -- ^ __mom__: Should be from a Variable(). -> Tensor v'5 t -- ^ __lr__: Scaling factor. Must be a scalar. -> Tensor v'6 t -- ^ __rho__: Decay rate. Must be a scalar. -> Tensor v'7 t -- ^ __momentum__ -> Tensor v'8 t -- ^ __epsilon__: Ridge term. Must be a scalar. -> Tensor v'9 t -- ^ __grad__: The gradient. -> m' (ControlNode) resourceApplyCenteredRMSProp' op'options var mg ms mom lr rho momentum epsilon grad | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs mg, buildInputs ms, buildInputs mom, buildInputs lr, buildInputs rho, buildInputs momentum, buildInputs epsilon, buildInputs grad] buildOp [] (opDef "ResourceApplyCenteredRMSProp" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "mg" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "ms" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "mom" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "lr" description: "Scaling factor. Must be a scalar." type_attr: "T" } input_arg { name: "rho" description: "Decay rate. Must be a scalar." type_attr: "T" } input_arg { name: "momentum" type_attr: "T" } input_arg { name: "epsilon" description: "Ridge term. Must be a scalar." type_attr: "T" } input_arg { name: "grad" description: "The gradient." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If `True`, updating of the var, mg, ms, and mom tensors is\nprotected by a lock; otherwise the behavior is undefined, but may exhibit less\ncontention." } -} -- | Update '*var' according to the Ftrl-proximal scheme. -- -- accum_new = accum + grad * grad -- linear += grad + (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var -- quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 -- var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 -- accum = accum_new resourceApplyFtrl :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable(). -> Tensor v'2 ResourceHandle -- ^ __accum__: Should be from a Variable(). -> Tensor v'3 ResourceHandle -- ^ __linear__: Should be from a Variable(). -> Tensor v'4 t -- ^ __grad__: The gradient. -> Tensor v'5 t -- ^ __lr__: Scaling factor. Must be a scalar. -> Tensor v'6 t -- ^ __l1__: L1 regulariation. Must be a scalar. -> Tensor v'7 t -- ^ __l2__: L2 regulariation. Must be a scalar. -> Tensor v'8 t -- ^ __lr_power__: Scaling factor. Must be a scalar. -> m' (ControlNode) resourceApplyFtrl = resourceApplyFtrl' id resourceApplyFtrl' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable(). -> Tensor v'2 ResourceHandle -- ^ __accum__: Should be from a Variable(). -> Tensor v'3 ResourceHandle -- ^ __linear__: Should be from a Variable(). -> Tensor v'4 t -- ^ __grad__: The gradient. -> Tensor v'5 t -- ^ __lr__: Scaling factor. Must be a scalar. -> Tensor v'6 t -- ^ __l1__: L1 regulariation. Must be a scalar. -> Tensor v'7 t -- ^ __l2__: L2 regulariation. Must be a scalar. -> Tensor v'8 t -- ^ __lr_power__: Scaling factor. Must be a scalar. -> m' (ControlNode) resourceApplyFtrl' op'options var accum linear grad lr l1 l2 lr_power | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs accum, buildInputs linear, buildInputs grad, buildInputs lr, buildInputs l1, buildInputs l2, buildInputs lr_power] buildOp [] (opDef "ResourceApplyFtrl" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "accum" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "linear" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "grad" description: "The gradient." type_attr: "T" } input_arg { name: "lr" description: "Scaling factor. Must be a scalar." type_attr: "T" } input_arg { name: "l1" description: "L1 regulariation. Must be a scalar." type_attr: "T" } input_arg { name: "l2" description: "L2 regulariation. Must be a scalar." type_attr: "T" } input_arg { name: "lr_power" description: "Scaling factor. Must be a scalar." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention." } -} -- | Update '*var' by subtracting 'alpha' * 'delta' from it. resourceApplyGradientDescent :: forall v'1 v'2 v'3 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable(). -> Tensor v'2 t -- ^ __alpha__: Scaling factor. Must be a scalar. -> Tensor v'3 t -- ^ __delta__: The change. -> m' (ControlNode) resourceApplyGradientDescent = resourceApplyGradientDescent' id resourceApplyGradientDescent' :: forall v'1 v'2 v'3 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable(). -> Tensor v'2 t -- ^ __alpha__: Scaling factor. Must be a scalar. -> Tensor v'3 t -- ^ __delta__: The change. -> m' (ControlNode) resourceApplyGradientDescent' op'options var alpha delta | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs alpha, buildInputs delta] buildOp [] (opDef "ResourceApplyGradientDescent" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "alpha" description: "Scaling factor. Must be a scalar." type_attr: "T" } input_arg { name: "delta" description: "The change." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If `True`, the subtraction will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention." } -} -- | Update '*var' according to the momentum scheme. Set use_nesterov = True if you -- -- want to use Nesterov momentum. -- -- accum = accum * momentum + grad -- var -= lr * accum resourceApplyMomentum :: forall v'1 v'2 v'3 v'4 v'5 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable(). -> Tensor v'2 ResourceHandle -- ^ __accum__: Should be from a Variable(). -> Tensor v'3 t -- ^ __lr__: Scaling factor. Must be a scalar. -> Tensor v'4 t -- ^ __grad__: The gradient. -> Tensor v'5 t -- ^ __momentum__: Momentum. Must be a scalar. -> m' (ControlNode) resourceApplyMomentum = resourceApplyMomentum' id resourceApplyMomentum' :: forall v'1 v'2 v'3 v'4 v'5 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable(). -> Tensor v'2 ResourceHandle -- ^ __accum__: Should be from a Variable(). -> Tensor v'3 t -- ^ __lr__: Scaling factor. Must be a scalar. -> Tensor v'4 t -- ^ __grad__: The gradient. -> Tensor v'5 t -- ^ __momentum__: Momentum. Must be a scalar. -> m' (ControlNode) resourceApplyMomentum' op'options var accum lr grad momentum | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs accum, buildInputs lr, buildInputs grad, buildInputs momentum] buildOp [] (opDef "ResourceApplyMomentum" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "accum" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "lr" description: "Scaling factor. Must be a scalar." type_attr: "T" } input_arg { name: "grad" description: "The gradient." type_attr: "T" } input_arg { name: "momentum" description: "Momentum. Must be a scalar." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention." } attr { name: "use_nesterov" type: "bool" default_value { b: false } description: "If `True`, the tensor passed to compute grad will be\nvar - lr * momentum * accum, so in the end, the var you get is actually\nvar - lr * momentum * accum." } -} -- | Update '*var' and '*accum' according to FOBOS with Adagrad learning rate. -- -- accum += grad * grad -- prox_v = var - lr * grad * (1 / sqrt(accum)) -- var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0} resourceApplyProximalAdagrad :: forall v'1 v'2 v'3 v'4 v'5 v'6 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable(). -> Tensor v'2 ResourceHandle -- ^ __accum__: Should be from a Variable(). -> Tensor v'3 t -- ^ __lr__: Scaling factor. Must be a scalar. -> Tensor v'4 t -- ^ __l1__: L1 regularization. Must be a scalar. -> Tensor v'5 t -- ^ __l2__: L2 regularization. Must be a scalar. -> Tensor v'6 t -- ^ __grad__: The gradient. -> m' (ControlNode) resourceApplyProximalAdagrad = resourceApplyProximalAdagrad' id resourceApplyProximalAdagrad' :: forall v'1 v'2 v'3 v'4 v'5 v'6 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable(). -> Tensor v'2 ResourceHandle -- ^ __accum__: Should be from a Variable(). -> Tensor v'3 t -- ^ __lr__: Scaling factor. Must be a scalar. -> Tensor v'4 t -- ^ __l1__: L1 regularization. Must be a scalar. -> Tensor v'5 t -- ^ __l2__: L2 regularization. Must be a scalar. -> Tensor v'6 t -- ^ __grad__: The gradient. -> m' (ControlNode) resourceApplyProximalAdagrad' op'options var accum lr l1 l2 grad | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs accum, buildInputs lr, buildInputs l1, buildInputs l2, buildInputs grad] buildOp [] (opDef "ResourceApplyProximalAdagrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "accum" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "lr" description: "Scaling factor. Must be a scalar." type_attr: "T" } input_arg { name: "l1" description: "L1 regularization. Must be a scalar." type_attr: "T" } input_arg { name: "l2" description: "L2 regularization. Must be a scalar." type_attr: "T" } input_arg { name: "grad" description: "The gradient." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention." } -} -- | Update '*var' as FOBOS algorithm with fixed learning rate. -- -- prox_v = var - alpha * delta -- var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0} resourceApplyProximalGradientDescent :: forall v'1 v'2 v'3 v'4 v'5 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable(). -> Tensor v'2 t -- ^ __alpha__: Scaling factor. Must be a scalar. -> Tensor v'3 t -- ^ __l1__: L1 regularization. Must be a scalar. -> Tensor v'4 t -- ^ __l2__: L2 regularization. Must be a scalar. -> Tensor v'5 t -- ^ __delta__: The change. -> m' (ControlNode) resourceApplyProximalGradientDescent = resourceApplyProximalGradientDescent' id resourceApplyProximalGradientDescent' :: forall v'1 v'2 v'3 v'4 v'5 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable(). -> Tensor v'2 t -- ^ __alpha__: Scaling factor. Must be a scalar. -> Tensor v'3 t -- ^ __l1__: L1 regularization. Must be a scalar. -> Tensor v'4 t -- ^ __l2__: L2 regularization. Must be a scalar. -> Tensor v'5 t -- ^ __delta__: The change. -> m' (ControlNode) resourceApplyProximalGradientDescent' op'options var alpha l1 l2 delta | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs alpha, buildInputs l1, buildInputs l2, buildInputs delta] buildOp [] (opDef "ResourceApplyProximalGradientDescent" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "alpha" description: "Scaling factor. Must be a scalar." type_attr: "T" } input_arg { name: "l1" description: "L1 regularization. Must be a scalar." type_attr: "T" } input_arg { name: "l2" description: "L2 regularization. Must be a scalar." type_attr: "T" } input_arg { name: "delta" description: "The change." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If True, the subtraction will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention." } -} -- | Update '*var' according to the RMSProp algorithm. -- -- Note that in dense implementation of this algorithm, ms and mom will -- update even if the grad is zero, but in this sparse implementation, ms -- and mom will not update in iterations during which the grad is zero. -- -- mean_square = decay * mean_square + (1-decay) * gradient ** 2 -- Delta = learning_rate * gradient / sqrt(mean_square + epsilon) -- -- ms <- rho * ms_{t-1} + (1-rho) * grad * grad -- mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) -- var <- var - mom resourceApplyRMSProp :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable(). -> Tensor v'2 ResourceHandle -- ^ __ms__: Should be from a Variable(). -> Tensor v'3 ResourceHandle -- ^ __mom__: Should be from a Variable(). -> Tensor v'4 t -- ^ __lr__: Scaling factor. Must be a scalar. -> Tensor v'5 t -- ^ __rho__: Decay rate. Must be a scalar. -> Tensor v'6 t -- ^ __momentum__ -> Tensor v'7 t -- ^ __epsilon__: Ridge term. Must be a scalar. -> Tensor v'8 t -- ^ __grad__: The gradient. -> m' (ControlNode) resourceApplyRMSProp = resourceApplyRMSProp' id resourceApplyRMSProp' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable(). -> Tensor v'2 ResourceHandle -- ^ __ms__: Should be from a Variable(). -> Tensor v'3 ResourceHandle -- ^ __mom__: Should be from a Variable(). -> Tensor v'4 t -- ^ __lr__: Scaling factor. Must be a scalar. -> Tensor v'5 t -- ^ __rho__: Decay rate. Must be a scalar. -> Tensor v'6 t -- ^ __momentum__ -> Tensor v'7 t -- ^ __epsilon__: Ridge term. Must be a scalar. -> Tensor v'8 t -- ^ __grad__: The gradient. -> m' (ControlNode) resourceApplyRMSProp' op'options var ms mom lr rho momentum epsilon grad | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs ms, buildInputs mom, buildInputs lr, buildInputs rho, buildInputs momentum, buildInputs epsilon, buildInputs grad] buildOp [] (opDef "ResourceApplyRMSProp" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "ms" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "mom" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "lr" description: "Scaling factor. Must be a scalar." type_attr: "T" } input_arg { name: "rho" description: "Decay rate. Must be a scalar." type_attr: "T" } input_arg { name: "momentum" type_attr: "T" } input_arg { name: "epsilon" description: "Ridge term. Must be a scalar." type_attr: "T" } input_arg { name: "grad" description: "The gradient." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If `True`, updating of the var, ms, and mom tensors is protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention." } -} -- | Gather slices from the variable pointed to by `resource` according to `indices`. -- -- `indices` must be an integer tensor of any dimension (usually 0-D or 1-D). -- Produces an output tensor with shape `indices.shape + params.shape[1:]` where: -- -- ```python -- # Scalar indices -- output[:, ..., :] = params[indices, :, ... :] -- -- # Vector indices -- output[i, :, ..., :] = params[indices[i], :, ... :] -- -- # Higher rank indices -- output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :] -- ``` resourceGather :: forall v'1 v'2 dtype tindices m' . (MonadBuild m', TensorType dtype, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor v'1 ResourceHandle -- ^ __resource__ -> Tensor v'2 tindices -- ^ __indices__ -> m' (Tensor Value dtype) -- ^ __output__ resourceGather = resourceGather' id resourceGather' :: forall v'1 v'2 dtype tindices m' . (MonadBuild m', TensorType dtype, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor v'1 ResourceHandle -- ^ __resource__ -> Tensor v'2 tindices -- ^ __indices__ -> m' (Tensor Value dtype) -- ^ __output__ resourceGather' op'options resource indices | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs resource, buildInputs indices] buildOp [] (opDef "ResourceGather" & opAttr "dtype" .~ tensorType (undefined :: dtype) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "resource" type: DT_RESOURCE } input_arg { name: "indices" type_attr: "Tindices" } output_arg { name: "output" type_attr: "dtype" } attr { name: "validate_indices" type: "bool" default_value { b: true } } attr { name: "dtype" type: "type" } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | Adds sparse updates to the variable referenced by `resource`. -- -- This operation computes -- -- # Scalar indices -- ref[indices, ...] += updates[...] -- -- # Vector indices (for each i) -- ref[indices[i], ...] += updates[i, ...] -- -- # High rank indices (for each i, ..., j) -- ref[indices[i, ..., j], ...] += updates[i, ..., j, ...] -- -- Duplicate entries are handled correctly: if multiple `indices` reference -- the same location, their contributions add. -- -- Requires `updates.shape = indices.shape + ref.shape[1:]`. -- --
-- --
resourceScatterAdd :: forall v'1 v'2 v'3 dtype tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] dtype, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor v'1 ResourceHandle -- ^ __resource__: Should be from a `Variable` node. -> Tensor v'2 tindices -- ^ __indices__: A tensor of indices into the first dimension of `ref`. -> Tensor v'3 dtype -- ^ __updates__: A tensor of updated values to add to `ref`. -> m' (ControlNode) resourceScatterAdd = resourceScatterAdd' id resourceScatterAdd' :: forall v'1 v'2 v'3 dtype tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] dtype, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor v'1 ResourceHandle -- ^ __resource__: Should be from a `Variable` node. -> Tensor v'2 tindices -- ^ __indices__: A tensor of indices into the first dimension of `ref`. -> Tensor v'3 dtype -- ^ __updates__: A tensor of updated values to add to `ref`. -> m' (ControlNode) resourceScatterAdd' op'options resource indices updates | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs resource, buildInputs indices, buildInputs updates] buildOp [] (opDef "ResourceScatterAdd" & opAttr "dtype" .~ tensorType (undefined :: dtype) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "resource" description: "Should be from a `Variable` node." type: DT_RESOURCE } input_arg { name: "indices" description: "A tensor of indices into the first dimension of `ref`." type_attr: "Tindices" } input_arg { name: "updates" description: "A tensor of updated values to add to `ref`." type_attr: "dtype" } attr { name: "dtype" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | var: Should be from a Variable(). resourceSparseApplyAdadelta :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor v'1 ResourceHandle -- ^ __var__ -> Tensor v'2 ResourceHandle -- ^ __accum__: Should be from a Variable(). -> Tensor v'3 ResourceHandle -- ^ __accum_update__: : Should be from a Variable(). -> Tensor v'4 t -- ^ __lr__: Learning rate. Must be a scalar. -> Tensor v'5 t -- ^ __rho__: Decay factor. Must be a scalar. -> Tensor v'6 t -- ^ __epsilon__: Constant factor. Must be a scalar. -> Tensor v'7 t -- ^ __grad__: The gradient. -> Tensor v'8 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum. -> m' (ControlNode) resourceSparseApplyAdadelta = resourceSparseApplyAdadelta' id resourceSparseApplyAdadelta' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor v'1 ResourceHandle -- ^ __var__ -> Tensor v'2 ResourceHandle -- ^ __accum__: Should be from a Variable(). -> Tensor v'3 ResourceHandle -- ^ __accum_update__: : Should be from a Variable(). -> Tensor v'4 t -- ^ __lr__: Learning rate. Must be a scalar. -> Tensor v'5 t -- ^ __rho__: Decay factor. Must be a scalar. -> Tensor v'6 t -- ^ __epsilon__: Constant factor. Must be a scalar. -> Tensor v'7 t -- ^ __grad__: The gradient. -> Tensor v'8 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum. -> m' (ControlNode) resourceSparseApplyAdadelta' op'options var accum accum_update lr rho epsilon grad indices | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs accum, buildInputs accum_update, buildInputs lr, buildInputs rho, buildInputs epsilon, buildInputs grad, buildInputs indices] buildOp [] (opDef "ResourceSparseApplyAdadelta" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" type: DT_RESOURCE } input_arg { name: "accum" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "accum_update" description: ": Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "lr" description: "Learning rate. Must be a scalar." type_attr: "T" } input_arg { name: "rho" description: "Decay factor. Must be a scalar." type_attr: "T" } input_arg { name: "epsilon" description: "Constant factor. Must be a scalar." type_attr: "T" } input_arg { name: "grad" description: "The gradient." type_attr: "T" } input_arg { name: "indices" description: "A vector of indices into the first dimension of var and accum." type_attr: "Tindices" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention." } -} -- | Update relevant entries in '*var' and '*accum' according to the adagrad scheme. -- -- That is for rows we have grad for, we update var and accum as follows: -- accum += grad * grad -- var -= lr * grad * (1 / sqrt(accum)) resourceSparseApplyAdagrad :: forall v'1 v'2 v'3 v'4 v'5 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable(). -> Tensor v'2 ResourceHandle -- ^ __accum__: Should be from a Variable(). -> Tensor v'3 t -- ^ __lr__: Learning rate. Must be a scalar. -> Tensor v'4 t -- ^ __grad__: The gradient. -> Tensor v'5 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum. -> m' (ControlNode) resourceSparseApplyAdagrad = resourceSparseApplyAdagrad' id resourceSparseApplyAdagrad' :: forall v'1 v'2 v'3 v'4 v'5 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable(). -> Tensor v'2 ResourceHandle -- ^ __accum__: Should be from a Variable(). -> Tensor v'3 t -- ^ __lr__: Learning rate. Must be a scalar. -> Tensor v'4 t -- ^ __grad__: The gradient. -> Tensor v'5 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum. -> m' (ControlNode) resourceSparseApplyAdagrad' op'options var accum lr grad indices | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs accum, buildInputs lr, buildInputs grad, buildInputs indices] buildOp [] (opDef "ResourceSparseApplyAdagrad" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "accum" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "lr" description: "Learning rate. Must be a scalar." type_attr: "T" } input_arg { name: "grad" description: "The gradient." type_attr: "T" } input_arg { name: "indices" description: "A vector of indices into the first dimension of var and accum." type_attr: "Tindices" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention." } -} -- | Update entries in '*var' and '*accum' according to the proximal adagrad scheme. resourceSparseApplyAdagradDA :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable(). -> Tensor v'2 ResourceHandle -- ^ __gradient_accumulator__: Should be from a Variable(). -> Tensor v'3 ResourceHandle -- ^ __gradient_squared_accumulator__: Should be from a Variable(). -> Tensor v'4 t -- ^ __grad__: The gradient. -> Tensor v'5 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum. -> Tensor v'6 t -- ^ __lr__: Learning rate. Must be a scalar. -> Tensor v'7 t -- ^ __l1__: L1 regularization. Must be a scalar. -> Tensor v'8 t -- ^ __l2__: L2 regularization. Must be a scalar. -> Tensor v'9 Data.Int.Int64 -- ^ __global_step__: Training step number. Must be a scalar. -> m' (ControlNode) resourceSparseApplyAdagradDA = resourceSparseApplyAdagradDA' id resourceSparseApplyAdagradDA' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable(). -> Tensor v'2 ResourceHandle -- ^ __gradient_accumulator__: Should be from a Variable(). -> Tensor v'3 ResourceHandle -- ^ __gradient_squared_accumulator__: Should be from a Variable(). -> Tensor v'4 t -- ^ __grad__: The gradient. -> Tensor v'5 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum. -> Tensor v'6 t -- ^ __lr__: Learning rate. Must be a scalar. -> Tensor v'7 t -- ^ __l1__: L1 regularization. Must be a scalar. -> Tensor v'8 t -- ^ __l2__: L2 regularization. Must be a scalar. -> Tensor v'9 Data.Int.Int64 -- ^ __global_step__: Training step number. Must be a scalar. -> m' (ControlNode) resourceSparseApplyAdagradDA' op'options var gradient_accumulator gradient_squared_accumulator grad indices lr l1 l2 global_step | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs gradient_accumulator, buildInputs gradient_squared_accumulator, buildInputs grad, buildInputs indices, buildInputs lr, buildInputs l1, buildInputs l2, buildInputs global_step] buildOp [] (opDef "ResourceSparseApplyAdagradDA" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "gradient_accumulator" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "gradient_squared_accumulator" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "grad" description: "The gradient." type_attr: "T" } input_arg { name: "indices" description: "A vector of indices into the first dimension of var and accum." type_attr: "Tindices" } input_arg { name: "lr" description: "Learning rate. Must be a scalar." type_attr: "T" } input_arg { name: "l1" description: "L1 regularization. Must be a scalar." type_attr: "T" } input_arg { name: "l2" description: "L2 regularization. Must be a scalar." type_attr: "T" } input_arg { name: "global_step" description: "Training step number. Must be a scalar." type: DT_INT64 } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention." } -} -- | Update '*var' according to the centered RMSProp algorithm. -- -- The centered RMSProp algorithm uses an estimate of the centered second moment -- (i.e., the variance) for normalization, as opposed to regular RMSProp, which -- uses the (uncentered) second moment. This often helps with training, but is -- slightly more expensive in terms of computation and memory. -- -- Note that in dense implementation of this algorithm, mg, ms, and mom will -- update even if the grad is zero, but in this sparse implementation, mg, ms, -- and mom will not update in iterations during which the grad is zero. -- -- mean_square = decay * mean_square + (1-decay) * gradient ** 2 -- mean_grad = decay * mean_grad + (1-decay) * gradient -- Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2) -- -- ms <- rho * ms_{t-1} + (1-rho) * grad * grad -- mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) -- var <- var - mom resourceSparseApplyCenteredRMSProp :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 v'10 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable(). -> Tensor v'2 ResourceHandle -- ^ __mg__: Should be from a Variable(). -> Tensor v'3 ResourceHandle -- ^ __ms__: Should be from a Variable(). -> Tensor v'4 ResourceHandle -- ^ __mom__: Should be from a Variable(). -> Tensor v'5 t -- ^ __lr__: Scaling factor. Must be a scalar. -> Tensor v'6 t -- ^ __rho__: Decay rate. Must be a scalar. -> Tensor v'7 t -- ^ __momentum__ -> Tensor v'8 t -- ^ __epsilon__: Ridge term. Must be a scalar. -> Tensor v'9 t -- ^ __grad__: The gradient. -> Tensor v'10 tindices -- ^ __indices__: A vector of indices into the first dimension of var, ms and mom. -> m' (ControlNode) resourceSparseApplyCenteredRMSProp = resourceSparseApplyCenteredRMSProp' id resourceSparseApplyCenteredRMSProp' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 v'10 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable(). -> Tensor v'2 ResourceHandle -- ^ __mg__: Should be from a Variable(). -> Tensor v'3 ResourceHandle -- ^ __ms__: Should be from a Variable(). -> Tensor v'4 ResourceHandle -- ^ __mom__: Should be from a Variable(). -> Tensor v'5 t -- ^ __lr__: Scaling factor. Must be a scalar. -> Tensor v'6 t -- ^ __rho__: Decay rate. Must be a scalar. -> Tensor v'7 t -- ^ __momentum__ -> Tensor v'8 t -- ^ __epsilon__: Ridge term. Must be a scalar. -> Tensor v'9 t -- ^ __grad__: The gradient. -> Tensor v'10 tindices -- ^ __indices__: A vector of indices into the first dimension of var, ms and mom. -> m' (ControlNode) resourceSparseApplyCenteredRMSProp' op'options var mg ms mom lr rho momentum epsilon grad indices | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs mg, buildInputs ms, buildInputs mom, buildInputs lr, buildInputs rho, buildInputs momentum, buildInputs epsilon, buildInputs grad, buildInputs indices] buildOp [] (opDef "ResourceSparseApplyCenteredRMSProp" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "mg" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "ms" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "mom" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "lr" description: "Scaling factor. Must be a scalar." type_attr: "T" } input_arg { name: "rho" description: "Decay rate. Must be a scalar." type_attr: "T" } input_arg { name: "momentum" type_attr: "T" } input_arg { name: "epsilon" description: "Ridge term. Must be a scalar." type_attr: "T" } input_arg { name: "grad" description: "The gradient." type_attr: "T" } input_arg { name: "indices" description: "A vector of indices into the first dimension of var, ms and mom." type_attr: "Tindices" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If `True`, updating of the var, mg, ms, and mom tensors is\nprotected by a lock; otherwise the behavior is undefined, but may exhibit less\ncontention." } -} -- | Update relevant entries in '*var' according to the Ftrl-proximal scheme. -- -- That is for rows we have grad for, we update var, accum and linear as follows: -- accum_new = accum + grad * grad -- linear += grad + (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var -- quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 -- var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 -- accum = accum_new resourceSparseApplyFtrl :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable(). -> Tensor v'2 ResourceHandle -- ^ __accum__: Should be from a Variable(). -> Tensor v'3 ResourceHandle -- ^ __linear__: Should be from a Variable(). -> Tensor v'4 t -- ^ __grad__: The gradient. -> Tensor v'5 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum. -> Tensor v'6 t -- ^ __lr__: Scaling factor. Must be a scalar. -> Tensor v'7 t -- ^ __l1__: L1 regularization. Must be a scalar. -> Tensor v'8 t -- ^ __l2__: L2 regularization. Must be a scalar. -> Tensor v'9 t -- ^ __lr_power__: Scaling factor. Must be a scalar. -> m' (ControlNode) resourceSparseApplyFtrl = resourceSparseApplyFtrl' id resourceSparseApplyFtrl' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable(). -> Tensor v'2 ResourceHandle -- ^ __accum__: Should be from a Variable(). -> Tensor v'3 ResourceHandle -- ^ __linear__: Should be from a Variable(). -> Tensor v'4 t -- ^ __grad__: The gradient. -> Tensor v'5 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum. -> Tensor v'6 t -- ^ __lr__: Scaling factor. Must be a scalar. -> Tensor v'7 t -- ^ __l1__: L1 regularization. Must be a scalar. -> Tensor v'8 t -- ^ __l2__: L2 regularization. Must be a scalar. -> Tensor v'9 t -- ^ __lr_power__: Scaling factor. Must be a scalar. -> m' (ControlNode) resourceSparseApplyFtrl' op'options var accum linear grad indices lr l1 l2 lr_power | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs accum, buildInputs linear, buildInputs grad, buildInputs indices, buildInputs lr, buildInputs l1, buildInputs l2, buildInputs lr_power] buildOp [] (opDef "ResourceSparseApplyFtrl" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "accum" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "linear" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "grad" description: "The gradient." type_attr: "T" } input_arg { name: "indices" description: "A vector of indices into the first dimension of var and accum." type_attr: "Tindices" } input_arg { name: "lr" description: "Scaling factor. Must be a scalar." type_attr: "T" } input_arg { name: "l1" description: "L1 regularization. Must be a scalar." type_attr: "T" } input_arg { name: "l2" description: "L2 regularization. Must be a scalar." type_attr: "T" } input_arg { name: "lr_power" description: "Scaling factor. Must be a scalar." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention." } -} -- | Update relevant entries in '*var' and '*accum' according to the momentum scheme. -- -- Set use_nesterov = True if you want to use Nesterov momentum. -- -- That is for rows we have grad for, we update var and accum as follows: -- -- accum = accum * momentum + grad -- var -= lr * accum resourceSparseApplyMomentum :: forall v'1 v'2 v'3 v'4 v'5 v'6 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable(). -> Tensor v'2 ResourceHandle -- ^ __accum__: Should be from a Variable(). -> Tensor v'3 t -- ^ __lr__: Learning rate. Must be a scalar. -> Tensor v'4 t -- ^ __grad__: The gradient. -> Tensor v'5 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum. -> Tensor v'6 t -- ^ __momentum__: Momentum. Must be a scalar. -> m' (ControlNode) resourceSparseApplyMomentum = resourceSparseApplyMomentum' id resourceSparseApplyMomentum' :: forall v'1 v'2 v'3 v'4 v'5 v'6 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable(). -> Tensor v'2 ResourceHandle -- ^ __accum__: Should be from a Variable(). -> Tensor v'3 t -- ^ __lr__: Learning rate. Must be a scalar. -> Tensor v'4 t -- ^ __grad__: The gradient. -> Tensor v'5 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum. -> Tensor v'6 t -- ^ __momentum__: Momentum. Must be a scalar. -> m' (ControlNode) resourceSparseApplyMomentum' op'options var accum lr grad indices momentum | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs accum, buildInputs lr, buildInputs grad, buildInputs indices, buildInputs momentum] buildOp [] (opDef "ResourceSparseApplyMomentum" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "accum" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "lr" description: "Learning rate. Must be a scalar." type_attr: "T" } input_arg { name: "grad" description: "The gradient." type_attr: "T" } input_arg { name: "indices" description: "A vector of indices into the first dimension of var and accum." type_attr: "Tindices" } input_arg { name: "momentum" description: "Momentum. Must be a scalar." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention." } attr { name: "use_nesterov" type: "bool" default_value { b: false } description: "If `True`, the tensor passed to compute grad will be\nvar - lr * momentum * accum, so in the end, the var you get is actually\nvar - lr * momentum * accum." } -} -- | Sparse update entries in '*var' and '*accum' according to FOBOS algorithm. -- -- That is for rows we have grad for, we update var and accum as follows: -- accum += grad * grad -- prox_v = var -- prox_v -= lr * grad * (1 / sqrt(accum)) -- var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0} resourceSparseApplyProximalAdagrad :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable(). -> Tensor v'2 ResourceHandle -- ^ __accum__: Should be from a Variable(). -> Tensor v'3 t -- ^ __lr__: Learning rate. Must be a scalar. -> Tensor v'4 t -- ^ __l1__: L1 regularization. Must be a scalar. -> Tensor v'5 t -- ^ __l2__: L2 regularization. Must be a scalar. -> Tensor v'6 t -- ^ __grad__: The gradient. -> Tensor v'7 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum. -> m' (ControlNode) resourceSparseApplyProximalAdagrad = resourceSparseApplyProximalAdagrad' id resourceSparseApplyProximalAdagrad' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable(). -> Tensor v'2 ResourceHandle -- ^ __accum__: Should be from a Variable(). -> Tensor v'3 t -- ^ __lr__: Learning rate. Must be a scalar. -> Tensor v'4 t -- ^ __l1__: L1 regularization. Must be a scalar. -> Tensor v'5 t -- ^ __l2__: L2 regularization. Must be a scalar. -> Tensor v'6 t -- ^ __grad__: The gradient. -> Tensor v'7 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum. -> m' (ControlNode) resourceSparseApplyProximalAdagrad' op'options var accum lr l1 l2 grad indices | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs accum, buildInputs lr, buildInputs l1, buildInputs l2, buildInputs grad, buildInputs indices] buildOp [] (opDef "ResourceSparseApplyProximalAdagrad" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "accum" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "lr" description: "Learning rate. Must be a scalar." type_attr: "T" } input_arg { name: "l1" description: "L1 regularization. Must be a scalar." type_attr: "T" } input_arg { name: "l2" description: "L2 regularization. Must be a scalar." type_attr: "T" } input_arg { name: "grad" description: "The gradient." type_attr: "T" } input_arg { name: "indices" description: "A vector of indices into the first dimension of var and accum." type_attr: "Tindices" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention." } -} -- | Sparse update '*var' as FOBOS algorithm with fixed learning rate. -- -- That is for rows we have grad for, we update var as follows: -- prox_v = var - alpha * grad -- var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0} resourceSparseApplyProximalGradientDescent :: forall v'1 v'2 v'3 v'4 v'5 v'6 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable(). -> Tensor v'2 t -- ^ __alpha__: Scaling factor. Must be a scalar. -> Tensor v'3 t -- ^ __l1__: L1 regularization. Must be a scalar. -> Tensor v'4 t -- ^ __l2__: L2 regularization. Must be a scalar. -> Tensor v'5 t -- ^ __grad__: The gradient. -> Tensor v'6 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum. -> m' (ControlNode) resourceSparseApplyProximalGradientDescent = resourceSparseApplyProximalGradientDescent' id resourceSparseApplyProximalGradientDescent' :: forall v'1 v'2 v'3 v'4 v'5 v'6 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable(). -> Tensor v'2 t -- ^ __alpha__: Scaling factor. Must be a scalar. -> Tensor v'3 t -- ^ __l1__: L1 regularization. Must be a scalar. -> Tensor v'4 t -- ^ __l2__: L2 regularization. Must be a scalar. -> Tensor v'5 t -- ^ __grad__: The gradient. -> Tensor v'6 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum. -> m' (ControlNode) resourceSparseApplyProximalGradientDescent' op'options var alpha l1 l2 grad indices | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs alpha, buildInputs l1, buildInputs l2, buildInputs grad, buildInputs indices] buildOp [] (opDef "ResourceSparseApplyProximalGradientDescent" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "alpha" description: "Scaling factor. Must be a scalar." type_attr: "T" } input_arg { name: "l1" description: "L1 regularization. Must be a scalar." type_attr: "T" } input_arg { name: "l2" description: "L2 regularization. Must be a scalar." type_attr: "T" } input_arg { name: "grad" description: "The gradient." type_attr: "T" } input_arg { name: "indices" description: "A vector of indices into the first dimension of var and accum." type_attr: "Tindices" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If True, the subtraction will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention." } -} -- | Update '*var' according to the RMSProp algorithm. -- -- Note that in dense implementation of this algorithm, ms and mom will -- update even if the grad is zero, but in this sparse implementation, ms -- and mom will not update in iterations during which the grad is zero. -- -- mean_square = decay * mean_square + (1-decay) * gradient ** 2 -- Delta = learning_rate * gradient / sqrt(mean_square + epsilon) -- -- ms <- rho * ms_{t-1} + (1-rho) * grad * grad -- mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) -- var <- var - mom resourceSparseApplyRMSProp :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable(). -> Tensor v'2 ResourceHandle -- ^ __ms__: Should be from a Variable(). -> Tensor v'3 ResourceHandle -- ^ __mom__: Should be from a Variable(). -> Tensor v'4 t -- ^ __lr__: Scaling factor. Must be a scalar. -> Tensor v'5 t -- ^ __rho__: Decay rate. Must be a scalar. -> Tensor v'6 t -- ^ __momentum__ -> Tensor v'7 t -- ^ __epsilon__: Ridge term. Must be a scalar. -> Tensor v'8 t -- ^ __grad__: The gradient. -> Tensor v'9 tindices -- ^ __indices__: A vector of indices into the first dimension of var, ms and mom. -> m' (ControlNode) resourceSparseApplyRMSProp = resourceSparseApplyRMSProp' id resourceSparseApplyRMSProp' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor v'1 ResourceHandle -- ^ __var__: Should be from a Variable(). -> Tensor v'2 ResourceHandle -- ^ __ms__: Should be from a Variable(). -> Tensor v'3 ResourceHandle -- ^ __mom__: Should be from a Variable(). -> Tensor v'4 t -- ^ __lr__: Scaling factor. Must be a scalar. -> Tensor v'5 t -- ^ __rho__: Decay rate. Must be a scalar. -> Tensor v'6 t -- ^ __momentum__ -> Tensor v'7 t -- ^ __epsilon__: Ridge term. Must be a scalar. -> Tensor v'8 t -- ^ __grad__: The gradient. -> Tensor v'9 tindices -- ^ __indices__: A vector of indices into the first dimension of var, ms and mom. -> m' (ControlNode) resourceSparseApplyRMSProp' op'options var ms mom lr rho momentum epsilon grad indices | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs ms, buildInputs mom, buildInputs lr, buildInputs rho, buildInputs momentum, buildInputs epsilon, buildInputs grad, buildInputs indices] buildOp [] (opDef "ResourceSparseApplyRMSProp" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "ms" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "mom" description: "Should be from a Variable()." type: DT_RESOURCE } input_arg { name: "lr" description: "Scaling factor. Must be a scalar." type_attr: "T" } input_arg { name: "rho" description: "Decay rate. Must be a scalar." type_attr: "T" } input_arg { name: "momentum" type_attr: "T" } input_arg { name: "epsilon" description: "Ridge term. Must be a scalar." type_attr: "T" } input_arg { name: "grad" description: "The gradient." type_attr: "T" } input_arg { name: "indices" description: "A vector of indices into the first dimension of var, ms and mom." type_attr: "Tindices" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If `True`, updating of the var, ms, and mom tensors is protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention." } -} -- | Restores a tensor from checkpoint files. -- -- Reads a tensor stored in one or several files. If there are several files (for -- instance because a tensor was saved as slices), `file_pattern` may contain -- wildcard symbols (`*` and `?`) in the filename portion only, not in the -- directory portion. -- -- If a `file_pattern` matches several files, `preferred_shard` can be used to hint -- in which file the requested tensor is likely to be found. This op will first -- open the file at index `preferred_shard` in the list of matching files and try -- to restore tensors from that file. Only if some tensors or tensor slices are -- not found in that first file, then the Op opens all the files. Setting -- `preferred_shard` to match the value passed as the `shard` input -- of a matching `Save` Op may speed up Restore. This attribute only affects -- performance, not correctness. The default value -1 means files are processed in -- order. -- -- See also `RestoreSlice`. restore :: forall v'1 v'2 dt . (TensorType dt) => Tensor v'1 Data.ByteString.ByteString -- ^ __file_pattern__: Must have a single element. The pattern of the files from -- which we read the tensor. -> Tensor v'2 Data.ByteString.ByteString -- ^ __tensor_name__: Must have a single element. The name of the tensor to be -- restored. -> Tensor Build dt -- ^ __tensor__: The restored tensor. restore = restore' id restore' :: forall v'1 v'2 dt . (TensorType dt) => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __file_pattern__: Must have a single element. The pattern of the files from -- which we read the tensor. -> Tensor v'2 Data.ByteString.ByteString -- ^ __tensor_name__: Must have a single element. The name of the tensor to be -- restored. -> Tensor Build dt -- ^ __tensor__: The restored tensor. restore' op'options file_pattern tensor_name | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs file_pattern, buildInputs tensor_name] return (opDef "Restore" & opAttr "dt" .~ tensorType (undefined :: dt) & op'options & opInputs .~ op'inputs) {- input_arg { name: "file_pattern" description: "Must have a single element. The pattern of the files from\nwhich we read the tensor." type: DT_STRING } input_arg { name: "tensor_name" description: "Must have a single element. The name of the tensor to be\nrestored." type: DT_STRING } output_arg { name: "tensor" description: "The restored tensor." type_attr: "dt" } attr { name: "dt" type: "type" description: "The type of the tensor to be restored." } attr { name: "preferred_shard" type: "int" default_value { i: -1 } description: "Index of file to open first if multiple files match\n`file_pattern`." } -} -- | Restores a tensor from checkpoint files. -- -- This is like `Restore` except that restored tensor can be listed as filling -- only a slice of a larger tensor. `shape_and_slice` specifies the shape of the -- larger tensor and the slice that the restored tensor covers. -- -- The `shape_and_slice` input has the same format as the -- elements of the `shapes_and_slices` input of the `SaveSlices` op. restoreSlice :: forall v'1 v'2 v'3 dt . (TensorType dt) => Tensor v'1 Data.ByteString.ByteString -- ^ __file_pattern__: Must have a single element. The pattern of the files from -- which we read the tensor. -> Tensor v'2 Data.ByteString.ByteString -- ^ __tensor_name__: Must have a single element. The name of the tensor to be -- restored. -> Tensor v'3 Data.ByteString.ByteString -- ^ __shape_and_slice__: Scalar. The shapes and slice specifications to use when -- restoring a tensors. -> Tensor Build dt -- ^ __tensor__: The restored tensor. restoreSlice = restoreSlice' id restoreSlice' :: forall v'1 v'2 v'3 dt . (TensorType dt) => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __file_pattern__: Must have a single element. The pattern of the files from -- which we read the tensor. -> Tensor v'2 Data.ByteString.ByteString -- ^ __tensor_name__: Must have a single element. The name of the tensor to be -- restored. -> Tensor v'3 Data.ByteString.ByteString -- ^ __shape_and_slice__: Scalar. The shapes and slice specifications to use when -- restoring a tensors. -> Tensor Build dt -- ^ __tensor__: The restored tensor. restoreSlice' op'options file_pattern tensor_name shape_and_slice | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs file_pattern, buildInputs tensor_name, buildInputs shape_and_slice] return (opDef "RestoreSlice" & opAttr "dt" .~ tensorType (undefined :: dt) & op'options & opInputs .~ op'inputs) {- input_arg { name: "file_pattern" description: "Must have a single element. The pattern of the files from\nwhich we read the tensor." type: DT_STRING } input_arg { name: "tensor_name" description: "Must have a single element. The name of the tensor to be\nrestored." type: DT_STRING } input_arg { name: "shape_and_slice" description: "Scalar. The shapes and slice specifications to use when\nrestoring a tensors." type: DT_STRING } output_arg { name: "tensor" description: "The restored tensor." type_attr: "dt" } attr { name: "dt" type: "type" description: "The type of the tensor to be restored." } attr { name: "preferred_shard" type: "int" default_value { i: -1 } description: "Index of file to open first if multiple files match\n`file_pattern`. See the documentation for `Restore`." } -} -- | Restores tensors from a V2 checkpoint. -- -- For backward compatibility with the V1 format, this Op currently allows -- restoring from a V1 checkpoint as well: -- - This Op first attempts to find the V2 index file pointed to by "prefix", and -- if found proceed to read it as a V2 checkpoint; -- - Otherwise the V1 read path is invoked. -- Relying on this behavior is not recommended, as the ability to fall back to read -- V1 might be deprecated and eventually removed. -- -- By default, restores the named tensors in full. If the caller wishes to restore -- specific slices of stored tensors, "shape_and_slices" should be non-empty -- strings and correspondingly well-formed. -- -- Callers must ensure all the named tensors are indeed stored in the checkpoint. restoreV2 :: forall v'1 v'2 v'3 dtypes . (TensorTypes dtypes) => Tensor v'1 Data.ByteString.ByteString -- ^ __prefix__: Must have a single element. The prefix of a V2 checkpoint. -> Tensor v'2 Data.ByteString.ByteString -- ^ __tensor_names__: shape {N}. The names of the tensors to be restored. -> Tensor v'3 Data.ByteString.ByteString -- ^ __shape_and_slices__: shape {N}. The slice specs of the tensors to be restored. -- Empty strings indicate that they are non-partitioned tensors. -> TensorList (Build) dtypes -- ^ __tensors__: shape {N}. The restored tensors, whose shapes are read from the -- checkpoint directly. restoreV2 = restoreV2' id restoreV2' :: forall v'1 v'2 v'3 dtypes . (TensorTypes dtypes) => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __prefix__: Must have a single element. The prefix of a V2 checkpoint. -> Tensor v'2 Data.ByteString.ByteString -- ^ __tensor_names__: shape {N}. The names of the tensors to be restored. -> Tensor v'3 Data.ByteString.ByteString -- ^ __shape_and_slices__: shape {N}. The slice specs of the tensors to be restored. -- Empty strings indicate that they are non-partitioned tensors. -> TensorList (Build) dtypes -- ^ __tensors__: shape {N}. The restored tensors, whose shapes are read from the -- checkpoint directly. restoreV2' op'options prefix tensor_names shape_and_slices | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs prefix, buildInputs tensor_names, buildInputs shape_and_slices] return (opDef "RestoreV2" & opAttr "dtypes" .~ fromTensorTypes (Proxy :: Proxy dtypes) & op'options & opInputs .~ op'inputs) {- input_arg { name: "prefix" description: "Must have a single element. The prefix of a V2 checkpoint." type: DT_STRING } input_arg { name: "tensor_names" description: "shape {N}. The names of the tensors to be restored." type: DT_STRING } input_arg { name: "shape_and_slices" description: "shape {N}. The slice specs of the tensors to be restored.\nEmpty strings indicate that they are non-partitioned tensors." type: DT_STRING } output_arg { name: "tensors" description: "shape {N}. The restored tensors, whose shapes are read from the\ncheckpoint directly." type_list_attr: "dtypes" } attr { name: "dtypes" type: "list(type)" description: "shape {N}. The list of expected dtype for the tensors. Must match\nthose stored in the checkpoint." has_minimum: true minimum: 1 } -} -- | Reverses specific dimensions of a tensor. -- -- Given a `tensor`, and a `bool` tensor `dims` representing the dimensions -- of `tensor`, this operation reverses each dimension i of `tensor` where -- `dims[i]` is `True`. -- -- `tensor` can have up to 8 dimensions. The number of dimensions -- of `tensor` must equal the number of elements in `dims`. In other words: -- -- `rank(tensor) = size(dims)` -- -- For example: -- -- ```prettyprint -- # tensor 't' is [[[[ 0, 1, 2, 3], -- # [ 4, 5, 6, 7], -- # [ 8, 9, 10, 11]], -- # [[12, 13, 14, 15], -- # [16, 17, 18, 19], -- # [20, 21, 22, 23]]]] -- # tensor 't' shape is [1, 2, 3, 4] -- -- # 'dims' is [False, False, False, True] -- reverse(t, dims) ==> [[[[ 3, 2, 1, 0], -- [ 7, 6, 5, 4], -- [ 11, 10, 9, 8]], -- [[15, 14, 13, 12], -- [19, 18, 17, 16], -- [23, 22, 21, 20]]]] -- -- # 'dims' is [False, True, False, False] -- reverse(t, dims) ==> [[[[12, 13, 14, 15], -- [16, 17, 18, 19], -- [20, 21, 22, 23] -- [[ 0, 1, 2, 3], -- [ 4, 5, 6, 7], -- [ 8, 9, 10, 11]]]] -- -- # 'dims' is [False, False, True, False] -- reverse(t, dims) ==> [[[[8, 9, 10, 11], -- [4, 5, 6, 7], -- [0, 1, 2, 3]] -- [[20, 21, 22, 23], -- [16, 17, 18, 19], -- [12, 13, 14, 15]]]] -- ``` reverse :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Bool, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __tensor__: Up to 8-D. -> Tensor v'2 Bool -- ^ __dims__: 1-D. The dimensions to reverse. -> Tensor Build t -- ^ __output__: The same shape as `tensor`. reverse = reverse' id reverse' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Bool, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __tensor__: Up to 8-D. -> Tensor v'2 Bool -- ^ __dims__: 1-D. The dimensions to reverse. -> Tensor Build t -- ^ __output__: The same shape as `tensor`. reverse' op'options tensor dims | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs tensor, buildInputs dims] return (opDef "Reverse" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "tensor" description: "Up to 8-D." type_attr: "T" } input_arg { name: "dims" description: "1-D. The dimensions to reverse." type: DT_BOOL } output_arg { name: "output" description: "The same shape as `tensor`." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_UINT8 type: DT_INT8 type: DT_INT32 type: DT_INT64 type: DT_BOOL type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | Reverses variable length slices. -- -- This op first slices `input` along the dimension `batch_dim`, and for each -- slice `i`, reverses the first `seq_lengths[i]` elements along -- the dimension `seq_dim`. -- -- The elements of `seq_lengths` must obey `seq_lengths[i] <= input.dims[seq_dim]`, -- and `seq_lengths` must be a vector of length `input.dims[batch_dim]`. -- -- The output slice `i` along dimension `batch_dim` is then given by input -- slice `i`, with the first `seq_lengths[i]` slices along dimension -- `seq_dim` reversed. -- -- For example: -- -- ```prettyprint -- # Given this: -- batch_dim = 0 -- seq_dim = 1 -- input.dims = (4, 8, ...) -- seq_lengths = [7, 2, 3, 5] -- -- # then slices of input are reversed on seq_dim, but only up to seq_lengths: -- output[0, 0:7, :, ...] = input[0, 7:0:-1, :, ...] -- output[1, 0:2, :, ...] = input[1, 2:0:-1, :, ...] -- output[2, 0:3, :, ...] = input[2, 3:0:-1, :, ...] -- output[3, 0:5, :, ...] = input[3, 5:0:-1, :, ...] -- -- # while entries past seq_lens are copied through: -- output[0, 7:, :, ...] = input[0, 7:, :, ...] -- output[1, 2:, :, ...] = input[1, 2:, :, ...] -- output[2, 3:, :, ...] = input[2, 3:, :, ...] -- output[3, 2:, :, ...] = input[3, 2:, :, ...] -- ``` -- -- In contrast, if: -- -- ```prettyprint -- # Given this: -- batch_dim = 2 -- seq_dim = 0 -- input.dims = (8, ?, 4, ...) -- seq_lengths = [7, 2, 3, 5] -- -- # then slices of input are reversed on seq_dim, but only up to seq_lengths: -- output[0:7, :, 0, :, ...] = input[7:0:-1, :, 0, :, ...] -- output[0:2, :, 1, :, ...] = input[2:0:-1, :, 1, :, ...] -- output[0:3, :, 2, :, ...] = input[3:0:-1, :, 2, :, ...] -- output[0:5, :, 3, :, ...] = input[5:0:-1, :, 3, :, ...] -- -- # while entries past seq_lens are copied through: -- output[7:, :, 0, :, ...] = input[7:, :, 0, :, ...] -- output[2:, :, 1, :, ...] = input[2:, :, 1, :, ...] -- output[3:, :, 2, :, ...] = input[3:, :, 2, :, ...] -- output[2:, :, 3, :, ...] = input[2:, :, 3, :, ...] -- ``` reverseSequence :: forall v'1 v'2 t tlen . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tlen) => Data.Int.Int64 -- ^ __seq_dim__: The dimension which is partially reversed. -> Tensor v'1 t -- ^ __input__: The input to reverse. -> Tensor v'2 tlen -- ^ __seq_lengths__: 1-D with length `input.dims(batch_dim)` and -- `max(seq_lengths) <= input.dims(seq_dim)` -> Tensor Build t -- ^ __output__: The partially reversed input. It has the same shape as `input`. reverseSequence = reverseSequence' id reverseSequence' :: forall v'1 v'2 t tlen . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tlen) => OpParams -> Data.Int.Int64 -- ^ __seq_dim__: The dimension which is partially reversed. -> Tensor v'1 t -- ^ __input__: The input to reverse. -> Tensor v'2 tlen -- ^ __seq_lengths__: 1-D with length `input.dims(batch_dim)` and -- `max(seq_lengths) <= input.dims(seq_dim)` -> Tensor Build t -- ^ __output__: The partially reversed input. It has the same shape as `input`. reverseSequence' op'options seq_dim input seq_lengths | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs seq_lengths] return (opDef "ReverseSequence" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tlen" .~ tensorType (undefined :: tlen) & opAttr "seq_dim" .~ seq_dim & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "The input to reverse." type_attr: "T" } input_arg { name: "seq_lengths" description: "1-D with length `input.dims(batch_dim)` and\n`max(seq_lengths) <= input.dims(seq_dim)`" type_attr: "Tlen" } output_arg { name: "output" description: "The partially reversed input. It has the same shape as `input`." type_attr: "T" } attr { name: "seq_dim" type: "int" description: "The dimension which is partially reversed." } attr { name: "batch_dim" type: "int" default_value { i: 0 } description: "The dimension along which reversal is performed." } attr { name: "T" type: "type" } attr { name: "Tlen" type: "type" default_value { type: DT_INT64 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | Reverses specific dimensions of a tensor. -- -- NOTE `tf.reverse` has now changed behavior in preparation for 1.0. -- `tf.reverse_v2` is currently an alias that will be deprecated before TF 1.0. -- -- Given a `tensor`, and a `int32` tensor `axis` representing the set of -- dimensions of `tensor` to reverse. This operation reverses each dimension -- `i` for which there exists `j` s.t. `axis[j] == i`. -- -- `tensor` can have up to 8 dimensions. The number of dimensions specified -- in `axis` may be 0 or more entries. If an index is specified more than -- once, a InvalidArgument error is raised. -- -- For example: -- -- ```prettyprint -- # tensor 't' is [[[[ 0, 1, 2, 3], -- # [ 4, 5, 6, 7], -- # [ 8, 9, 10, 11]], -- # [[12, 13, 14, 15], -- # [16, 17, 18, 19], -- # [20, 21, 22, 23]]]] -- # tensor 't' shape is [1, 2, 3, 4] -- -- # 'dims' is [3] or 'dims' is -1 -- reverse(t, dims) ==> [[[[ 3, 2, 1, 0], -- [ 7, 6, 5, 4], -- [ 11, 10, 9, 8]], -- [[15, 14, 13, 12], -- [19, 18, 17, 16], -- [23, 22, 21, 20]]]] -- -- # 'dims' is '[1]' (or 'dims' is '[-3]') -- reverse(t, dims) ==> [[[[12, 13, 14, 15], -- [16, 17, 18, 19], -- [20, 21, 22, 23] -- [[ 0, 1, 2, 3], -- [ 4, 5, 6, 7], -- [ 8, 9, 10, 11]]]] -- -- # 'dims' is '[2]' (or 'dims' is '[-2]') -- reverse(t, dims) ==> [[[[8, 9, 10, 11], -- [4, 5, 6, 7], -- [0, 1, 2, 3]] -- [[20, 21, 22, 23], -- [16, 17, 18, 19], -- [12, 13, 14, 15]]]] -- ``` reverseV2 :: forall v'1 v'2 tidx t . (OneOf '[Data.Int.Int32, Data.Int.Int64] tidx, OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Bool, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __tensor__: Up to 8-D. -> Tensor v'2 tidx -- ^ __axis__: 1-D. The indices of the dimensions to reverse. -> Tensor Build t -- ^ __output__: The same shape as `tensor`. reverseV2 = reverseV2' id reverseV2' :: forall v'1 v'2 tidx t . (OneOf '[Data.Int.Int32, Data.Int.Int64] tidx, OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Bool, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __tensor__: Up to 8-D. -> Tensor v'2 tidx -- ^ __axis__: 1-D. The indices of the dimensions to reverse. -> Tensor Build t -- ^ __output__: The same shape as `tensor`. reverseV2' op'options tensor axis | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs tensor, buildInputs axis] return (opDef "ReverseV2" & opAttr "Tidx" .~ tensorType (undefined :: tidx) & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "tensor" description: "Up to 8-D." type_attr: "T" } input_arg { name: "axis" description: "1-D. The indices of the dimensions to reverse." type_attr: "Tidx" } output_arg { name: "output" description: "The same shape as `tensor`." type_attr: "T" } attr { name: "Tidx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "T" type: "type" allowed_values { list { type: DT_UINT8 type: DT_INT8 type: DT_INT32 type: DT_INT64 type: DT_BOOL type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | Returns element-wise integer closest to x. -- -- If the result is midway between two representable values, -- the even representable is chosen. -- For example: -- -- ``` -- rint(-1.5) ==> -2.0 -- rint(0.5000001) ==> 1.0 -- rint([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) ==> [-2., -2., -0., 0., 2., 2., 2.] -- ``` rint :: forall v'1 t . (OneOf '[Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ rint = rint' id rint' :: forall v'1 t . (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ rint' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "Rint" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } -} -- | Rounds the values of a tensor to the nearest integer, element-wise. -- -- Rounds half to even. Also known as bankers rounding. If you want to round -- according to the current system rounding mode use std::cint. round :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ round = round' id round' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ round' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "Round" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | Computes reciprocal of square root of x element-wise. -- -- I.e., \\(y = 1 / \sqrt{x}\\). rsqrt :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ rsqrt = rsqrt' id rsqrt' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ rsqrt' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "Rsqrt" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | Computes the gradient for the rsqrt of `x` wrt its input. -- -- Specifically, `grad = dy * -0.5 * y^3`, where `y = rsqrt(x)`, and `dy` -- is the corresponding input gradient. rsqrtGrad :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ rsqrtGrad = rsqrtGrad' id rsqrtGrad' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ rsqrtGrad' op'options x y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y] return (opDef "RsqrtGrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | Generate a single randomly distorted bounding box for an image. -- -- Bounding box annotations are often supplied in addition to ground-truth labels -- in image recognition or object localization tasks. A common technique for -- training such a system is to randomly distort an image while preserving -- its content, i.e. *data augmentation*. This Op outputs a randomly distorted -- localization of an object, i.e. bounding box, given an `image_size`, -- `bounding_boxes` and a series of constraints. -- -- The output of this Op is a single bounding box that may be used to crop the -- original image. The output is returned as 3 tensors: `begin`, `size` and -- `bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the -- image. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize -- what the bounding box looks like. -- -- Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The -- bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and -- height of the underlying image. -- -- For example, -- -- ```python -- # Generate a single distorted bounding box. -- begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box( -- tf.shape(image), -- bounding_boxes=bounding_boxes) -- -- # Draw the bounding box in an image summary. -- image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0), -- bbox_for_draw) -- tf.image_summary('images_with_box', image_with_box) -- -- # Employ the bounding box to distort the image. -- distorted_image = tf.slice(image, begin, size) -- ``` -- -- Note that if no bounding box information is available, setting -- `use_image_if_no_bounding_boxes = true` will assume there is a single implicit -- bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is -- false and no bounding boxes are supplied, an error is raised. sampleDistortedBoundingBox :: forall v'1 v'2 t m' . (MonadBuild m', OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word8] t) => Tensor v'1 t -- ^ __image_size__: 1-D, containing `[height, width, channels]`. -> Tensor v'2 Float -- ^ __bounding_boxes__: 3-D with shape `[batch, N, 4]` describing the N bounding boxes -- associated with the image. -> m' ((Tensor Value t, Tensor Value t, Tensor Value Float)) -- ^ (__begin__, __size__, __bboxes__) -- -- * __begin__: 1-D, containing `[offset_height, offset_width, 0]`. Provide as input to -- `tf.slice`. -- -- * __size__: 1-D, containing `[target_height, target_width, -1]`. Provide as input to -- `tf.slice`. -- -- * __bboxes__: 3-D with shape `[1, 1, 4]` containing the distorted bounding box. -- Provide as input to `tf.image.draw_bounding_boxes`. sampleDistortedBoundingBox = sampleDistortedBoundingBox' id sampleDistortedBoundingBox' :: forall v'1 v'2 t m' . (MonadBuild m', OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word8] t) => OpParams -> Tensor v'1 t -- ^ __image_size__: 1-D, containing `[height, width, channels]`. -> Tensor v'2 Float -- ^ __bounding_boxes__: 3-D with shape `[batch, N, 4]` describing the N bounding boxes -- associated with the image. -> m' ((Tensor Value t, Tensor Value t, Tensor Value Float)) -- ^ (__begin__, __size__, __bboxes__) -- -- * __begin__: 1-D, containing `[offset_height, offset_width, 0]`. Provide as input to -- `tf.slice`. -- -- * __size__: 1-D, containing `[target_height, target_width, -1]`. Provide as input to -- `tf.slice`. -- -- * __bboxes__: 3-D with shape `[1, 1, 4]` containing the distorted bounding box. -- Provide as input to `tf.image.draw_bounding_boxes`. sampleDistortedBoundingBox' op'options image_size bounding_boxes | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs image_size, buildInputs bounding_boxes] buildOp [] (opDef "SampleDistortedBoundingBox" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "image_size" description: "1-D, containing `[height, width, channels]`." type_attr: "T" } input_arg { name: "bounding_boxes" description: "3-D with shape `[batch, N, 4]` describing the N bounding boxes\nassociated with the image." type: DT_FLOAT } output_arg { name: "begin" description: "1-D, containing `[offset_height, offset_width, 0]`. Provide as input to\n`tf.slice`." type_attr: "T" } output_arg { name: "size" description: "1-D, containing `[target_height, target_width, -1]`. Provide as input to\n`tf.slice`." type_attr: "T" } output_arg { name: "bboxes" description: "3-D with shape `[1, 1, 4]` containing the distorted bounding box.\nProvide as input to `tf.image.draw_bounding_boxes`." type: DT_FLOAT } attr { name: "T" type: "type" allowed_values { list { type: DT_UINT8 type: DT_INT8 type: DT_INT16 type: DT_INT32 type: DT_INT64 } } } attr { name: "seed" type: "int" default_value { i: 0 } description: "If either `seed` or `seed2` are set to non-zero, the random number\ngenerator is seeded by the given `seed`. Otherwise, it is seeded by a random\nseed." } attr { name: "seed2" type: "int" default_value { i: 0 } description: "A second seed to avoid seed collision." } attr { name: "min_object_covered" type: "float" default_value { f: 0.1 } description: "The cropped area of the image must contain at least this\nfraction of any bounding box supplied. The value of this parameter should be\nnon-negative. In the case of 0, the cropped area does not need to overlap\nany of the bounding boxes supplied." } attr { name: "aspect_ratio_range" type: "list(float)" default_value { list { f: 0.75 f: 1.33 } } description: "The cropped area of the image must have an aspect ratio =\nwidth / height within this range." } attr { name: "area_range" type: "list(float)" default_value { list { f: 5.0e-2 f: 1.0 } } description: "The cropped area of the image must contain a fraction of the\nsupplied image within in this range." } attr { name: "max_attempts" type: "int" default_value { i: 100 } description: "Number of attempts at generating a cropped region of the image\nof the specified constraints. After `max_attempts` failures, return the entire\nimage." } attr { name: "use_image_if_no_bounding_boxes" type: "bool" default_value { b: false } description: "Controls behavior if no bounding boxes supplied.\nIf true, assume an implicit bounding box covering the whole input. If false,\nraise an error." } -} -- | Saves the input tensors to disk. -- -- The size of `tensor_names` must match the number of tensors in `data`. `data[i]` -- is written to `filename` with name `tensor_names[i]`. -- -- See also `SaveSlices`. save :: forall v'1 v'2 v'3 t m' . (MonadBuild m', TensorTypes t) => Tensor v'1 Data.ByteString.ByteString -- ^ __filename__: Must have a single element. The name of the file to which we write -- the tensor. -> Tensor v'2 Data.ByteString.ByteString -- ^ __tensor_names__: Shape `[N]`. The names of the tensors to be saved. -> TensorList (v'3) t -- ^ __data__: `N` tensors to save. -> m' (ControlNode) save = save' id save' :: forall v'1 v'2 v'3 t m' . (MonadBuild m', TensorTypes t) => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __filename__: Must have a single element. The name of the file to which we write -- the tensor. -> Tensor v'2 Data.ByteString.ByteString -- ^ __tensor_names__: Shape `[N]`. The names of the tensors to be saved. -> TensorList (v'3) t -- ^ __data__: `N` tensors to save. -> m' (ControlNode) save' op'options filename tensor_names data' | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs filename, buildInputs tensor_names, buildInputs data'] buildOp [] (opDef "Save" & opAttr "T" .~ fromTensorTypes (Proxy :: Proxy t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "filename" description: "Must have a single element. The name of the file to which we write\nthe tensor." type: DT_STRING } input_arg { name: "tensor_names" description: "Shape `[N]`. The names of the tensors to be saved." type: DT_STRING } input_arg { name: "data" description: "`N` tensors to save." type_list_attr: "T" } attr { name: "T" type: "list(type)" has_minimum: true minimum: 1 } -} -- | Saves input tensors slices to disk. -- -- This is like `Save` except that tensors can be listed in the saved file as being -- a slice of a larger tensor. `shapes_and_slices` specifies the shape of the -- larger tensor and the slice that this tensor covers. `shapes_and_slices` must -- have as many elements as `tensor_names`. -- -- Elements of the `shapes_and_slices` input must either be: -- -- * The empty string, in which case the corresponding tensor is -- saved normally. -- * A string of the form `dim0 dim1 ... dimN-1 slice-spec` where the -- `dimI` are the dimensions of the larger tensor and `slice-spec` -- specifies what part is covered by the tensor to save. -- -- `slice-spec` itself is a `:`-separated list: `slice0:slice1:...:sliceN-1` -- where each `sliceI` is either: -- -- * The string `-` meaning that the slice covers all indices of this dimension -- * `start,length` where `start` and `length` are integers. In that -- case the slice covers `length` indices starting at `start`. -- -- See also `Save`. saveSlices :: forall v'1 v'2 v'3 v'4 t m' . (MonadBuild m', TensorTypes t) => Tensor v'1 Data.ByteString.ByteString -- ^ __filename__: Must have a single element. The name of the file to which we write the -- tensor. -> Tensor v'2 Data.ByteString.ByteString -- ^ __tensor_names__: Shape `[N]`. The names of the tensors to be saved. -> Tensor v'3 Data.ByteString.ByteString -- ^ __shapes_and_slices__: Shape `[N]`. The shapes and slice specifications to use when -- saving the tensors. -> TensorList (v'4) t -- ^ __data__: `N` tensors to save. -> m' (ControlNode) saveSlices = saveSlices' id saveSlices' :: forall v'1 v'2 v'3 v'4 t m' . (MonadBuild m', TensorTypes t) => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __filename__: Must have a single element. The name of the file to which we write the -- tensor. -> Tensor v'2 Data.ByteString.ByteString -- ^ __tensor_names__: Shape `[N]`. The names of the tensors to be saved. -> Tensor v'3 Data.ByteString.ByteString -- ^ __shapes_and_slices__: Shape `[N]`. The shapes and slice specifications to use when -- saving the tensors. -> TensorList (v'4) t -- ^ __data__: `N` tensors to save. -> m' (ControlNode) saveSlices' op'options filename tensor_names shapes_and_slices data' | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs filename, buildInputs tensor_names, buildInputs shapes_and_slices, buildInputs data'] buildOp [] (opDef "SaveSlices" & opAttr "T" .~ fromTensorTypes (Proxy :: Proxy t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "filename" description: "Must have a single element. The name of the file to which we write the\ntensor." type: DT_STRING } input_arg { name: "tensor_names" description: "Shape `[N]`. The names of the tensors to be saved." type: DT_STRING } input_arg { name: "shapes_and_slices" description: "Shape `[N]`. The shapes and slice specifications to use when\nsaving the tensors." type: DT_STRING } input_arg { name: "data" description: "`N` tensors to save." type_list_attr: "T" } attr { name: "T" type: "list(type)" has_minimum: true minimum: 1 } -} -- | Saves tensors in V2 checkpoint format. -- -- By default, saves the named tensors in full. If the caller wishes to save -- specific slices of full tensors, "shape_and_slices" should be non-empty strings -- and correspondingly well-formed. saveV2 :: forall v'1 v'2 v'3 v'4 dtypes m' . (MonadBuild m', TensorTypes dtypes) => Tensor v'1 Data.ByteString.ByteString -- ^ __prefix__: Must have a single element. The prefix of the V2 checkpoint to which we -- write the tensors. -> Tensor v'2 Data.ByteString.ByteString -- ^ __tensor_names__: shape {N}. The names of the tensors to be saved. -> Tensor v'3 Data.ByteString.ByteString -- ^ __shape_and_slices__: shape {N}. The slice specs of the tensors to be saved. -- Empty strings indicate that they are non-partitioned tensors. -> TensorList (v'4) dtypes -- ^ __tensors__: `N` tensors to save. -> m' (ControlNode) saveV2 = saveV2' id saveV2' :: forall v'1 v'2 v'3 v'4 dtypes m' . (MonadBuild m', TensorTypes dtypes) => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __prefix__: Must have a single element. The prefix of the V2 checkpoint to which we -- write the tensors. -> Tensor v'2 Data.ByteString.ByteString -- ^ __tensor_names__: shape {N}. The names of the tensors to be saved. -> Tensor v'3 Data.ByteString.ByteString -- ^ __shape_and_slices__: shape {N}. The slice specs of the tensors to be saved. -- Empty strings indicate that they are non-partitioned tensors. -> TensorList (v'4) dtypes -- ^ __tensors__: `N` tensors to save. -> m' (ControlNode) saveV2' op'options prefix tensor_names shape_and_slices tensors | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs prefix, buildInputs tensor_names, buildInputs shape_and_slices, buildInputs tensors] buildOp [] (opDef "SaveV2" & opAttr "dtypes" .~ fromTensorTypes (Proxy :: Proxy dtypes) & op'options & opInputs .~ op'inputs) {- input_arg { name: "prefix" description: "Must have a single element. The prefix of the V2 checkpoint to which we\nwrite the tensors." type: DT_STRING } input_arg { name: "tensor_names" description: "shape {N}. The names of the tensors to be saved." type: DT_STRING } input_arg { name: "shape_and_slices" description: "shape {N}. The slice specs of the tensors to be saved.\nEmpty strings indicate that they are non-partitioned tensors." type: DT_STRING } input_arg { name: "tensors" description: "`N` tensors to save." type_list_attr: "dtypes" } attr { name: "dtypes" type: "list(type)" has_minimum: true minimum: 1 } -} -- | Outputs a `Summary` protocol buffer with scalar values. -- -- The input `tags` and `values` must have the same shape. The generated summary -- has a summary value for each tag-value pair in `tags` and `values`. scalarSummary :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 Data.ByteString.ByteString -- ^ __tags__: Tags for the summary. -> Tensor v'2 t -- ^ __values__: Same shape as `tags. Values for the summary. -> Tensor Build Data.ByteString.ByteString -- ^ __summary__: Scalar. Serialized `Summary` protocol buffer. scalarSummary = scalarSummary' id scalarSummary' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __tags__: Tags for the summary. -> Tensor v'2 t -- ^ __values__: Same shape as `tags. Values for the summary. -> Tensor Build Data.ByteString.ByteString -- ^ __summary__: Scalar. Serialized `Summary` protocol buffer. scalarSummary' op'options tags values | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs tags, buildInputs values] return (opDef "ScalarSummary" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "tags" description: "Tags for the summary." type: DT_STRING } input_arg { name: "values" description: "Same shape as `tags. Values for the summary." type_attr: "T" } output_arg { name: "summary" description: "Scalar. Serialized `Summary` protocol buffer." type: DT_STRING } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF } } } -} -- | Adds sparse updates to a variable reference. -- -- This operation computes -- -- # Scalar indices -- ref[indices, ...] += updates[...] -- -- # Vector indices (for each i) -- ref[indices[i], ...] += updates[i, ...] -- -- # High rank indices (for each i, ..., j) -- ref[indices[i, ..., j], ...] += updates[i, ..., j, ...] -- -- This operation outputs `ref` after the update is done. -- This makes it easier to chain operations that need to use the reset value. -- -- Duplicate entries are handled correctly: if multiple `indices` reference -- the same location, their contributions add. -- -- Requires `updates.shape = indices.shape + ref.shape[1:]`. -- --
-- --
scatterAdd :: forall v'2 v'3 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor Ref t -- ^ __ref__: Should be from a `Variable` node. -> Tensor v'2 tindices -- ^ __indices__: A tensor of indices into the first dimension of `ref`. -> Tensor v'3 t -- ^ __updates__: A tensor of updated values to add to `ref`. -> m' (Tensor Ref t) -- ^ __output_ref__: = Same as `ref`. Returned as a convenience for operations that want -- to use the updated values after the update is done. scatterAdd = scatterAdd' id scatterAdd' :: forall v'2 v'3 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor Ref t -- ^ __ref__: Should be from a `Variable` node. -> Tensor v'2 tindices -- ^ __indices__: A tensor of indices into the first dimension of `ref`. -> Tensor v'3 t -- ^ __updates__: A tensor of updated values to add to `ref`. -> m' (Tensor Ref t) -- ^ __output_ref__: = Same as `ref`. Returned as a convenience for operations that want -- to use the updated values after the update is done. scatterAdd' op'options ref indices updates | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs ref, buildInputs indices, buildInputs updates] buildOp [] (opDef "ScatterAdd" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "ref" description: "Should be from a `Variable` node." type_attr: "T" is_ref: true } input_arg { name: "indices" description: "A tensor of indices into the first dimension of `ref`." type_attr: "Tindices" } input_arg { name: "updates" description: "A tensor of updated values to add to `ref`." type_attr: "T" } output_arg { name: "output_ref" description: "= Same as `ref`. Returned as a convenience for operations that want\nto use the updated values after the update is done." type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If True, the addition will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention." } -} -- | Divides a variable reference by sparse updates. -- -- This operation computes -- -- # Scalar indices -- ref[indices, ...] /= updates[...] -- -- # Vector indices (for each i) -- ref[indices[i], ...] /= updates[i, ...] -- -- # High rank indices (for each i, ..., j) -- ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...] -- -- This operation outputs `ref` after the update is done. -- This makes it easier to chain operations that need to use the reset value. -- -- Duplicate entries are handled correctly: if multiple `indices` reference -- the same location, their contributions divide. -- -- Requires `updates.shape = indices.shape + ref.shape[1:]`. scatterDiv :: forall v'2 v'3 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor Ref t -- ^ __ref__: Should be from a `Variable` node. -> Tensor v'2 tindices -- ^ __indices__: A tensor of indices into the first dimension of `ref`. -> Tensor v'3 t -- ^ __updates__: A tensor of values that `ref` is divided by. -> m' (Tensor Ref t) -- ^ __output_ref__: = Same as `ref`. Returned as a convenience for operations that want -- to use the updated values after the update is done. scatterDiv = scatterDiv' id scatterDiv' :: forall v'2 v'3 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor Ref t -- ^ __ref__: Should be from a `Variable` node. -> Tensor v'2 tindices -- ^ __indices__: A tensor of indices into the first dimension of `ref`. -> Tensor v'3 t -- ^ __updates__: A tensor of values that `ref` is divided by. -> m' (Tensor Ref t) -- ^ __output_ref__: = Same as `ref`. Returned as a convenience for operations that want -- to use the updated values after the update is done. scatterDiv' op'options ref indices updates | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs ref, buildInputs indices, buildInputs updates] buildOp [] (opDef "ScatterDiv" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "ref" description: "Should be from a `Variable` node." type_attr: "T" is_ref: true } input_arg { name: "indices" description: "A tensor of indices into the first dimension of `ref`." type_attr: "Tindices" } input_arg { name: "updates" description: "A tensor of values that `ref` is divided by." type_attr: "T" } output_arg { name: "output_ref" description: "= Same as `ref`. Returned as a convenience for operations that want\nto use the updated values after the update is done." type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If True, the operation will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention." } -} -- | Multiplies sparse updates into a variable reference. -- -- This operation computes -- -- # Scalar indices -- ref[indices, ...] *= updates[...] -- -- # Vector indices (for each i) -- ref[indices[i], ...] *= updates[i, ...] -- -- # High rank indices (for each i, ..., j) -- ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...] -- -- This operation outputs `ref` after the update is done. -- This makes it easier to chain operations that need to use the reset value. -- -- Duplicate entries are handled correctly: if multiple `indices` reference -- the same location, their contributions multiply. -- -- Requires `updates.shape = indices.shape + ref.shape[1:]`. scatterMul :: forall v'2 v'3 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor Ref t -- ^ __ref__: Should be from a `Variable` node. -> Tensor v'2 tindices -- ^ __indices__: A tensor of indices into the first dimension of `ref`. -> Tensor v'3 t -- ^ __updates__: A tensor of updated values to multiply to `ref`. -> m' (Tensor Ref t) -- ^ __output_ref__: = Same as `ref`. Returned as a convenience for operations that want -- to use the updated values after the update is done. scatterMul = scatterMul' id scatterMul' :: forall v'2 v'3 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor Ref t -- ^ __ref__: Should be from a `Variable` node. -> Tensor v'2 tindices -- ^ __indices__: A tensor of indices into the first dimension of `ref`. -> Tensor v'3 t -- ^ __updates__: A tensor of updated values to multiply to `ref`. -> m' (Tensor Ref t) -- ^ __output_ref__: = Same as `ref`. Returned as a convenience for operations that want -- to use the updated values after the update is done. scatterMul' op'options ref indices updates | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs ref, buildInputs indices, buildInputs updates] buildOp [] (opDef "ScatterMul" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "ref" description: "Should be from a `Variable` node." type_attr: "T" is_ref: true } input_arg { name: "indices" description: "A tensor of indices into the first dimension of `ref`." type_attr: "Tindices" } input_arg { name: "updates" description: "A tensor of updated values to multiply to `ref`." type_attr: "T" } output_arg { name: "output_ref" description: "= Same as `ref`. Returned as a convenience for operations that want\nto use the updated values after the update is done." type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If True, the operation will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention." } -} -- | Creates a new tensor by applying sparse `updates` to individual -- -- values or slices within a zero tensor of the given `shape` tensor according to -- indices. This operator is the inverse of the [tf.gather_nd](#gather_nd) -- operator which extracts values or slices from a given tensor. -- -- TODO(simister): Add a link to Variable.__getitem__ documentation on slice -- syntax. -- -- `shape` is a `TensorShape` with rank `P` and `indices` is a `Tensor` of rank -- `Q`. -- -- `indices` must be integer tensor, containing indices into `shape`. -- It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. -- -- The innermost dimension of `indices` (with length `K`) corresponds to -- indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th -- dimension of `shape`. -- -- `updates` is Tensor of rank `Q-1+P-K` with shape: -- -- ``` -- [d_0, ..., d_{Q-2}, shape[K], ..., shape[P-1]]. -- ``` -- -- The simplest form of scatter is to insert individual elements in a tensor by -- index. For example, say we want to insert 4 scattered elements in a rank-1 -- tensor with 8 elements. -- --
-- --
-- -- In Python, this scatter operation would look like this: -- -- indices = tf.constant([[4], [3], [1], [7]]) -- updates = tf.constant([9, 10, 11, 12]) -- shape = tf.constant([8]) -- scatter = tf.scatter_nd(indices, updates, shape) -- with tf.Session() as sess: -- print sess.run(scatter) -- -- The resulting tensor would look like this: -- -- [0, 11, 0, 10, 9, 0, 0, 12] -- -- We can also, insert entire slices of a higher rank tensor all at once. For -- example, if we wanted to insert two slices in the first dimension of a -- rank-3 tensor with two matrices of new values. -- --
-- --
-- -- In Python, this scatter operation would look like this: -- -- indices = tf.constant([[0], [2]]) -- updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6], -- [7, 7, 7, 7], [8, 8, 8, 8]], -- [[5, 5, 5, 5], [6, 6, 6, 6], -- [7, 7, 7, 7], [8, 8, 8, 8]]]) -- shape = tf.constant([4, 4, 4]) -- scatter = tf.scatter_nd(indices, updates, shape) -- with tf.Session() as sess: -- print sess.run(scatter) -- -- The resulting tensor would look like this: -- -- [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], -- [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], -- [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], -- [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]] scatterNd :: forall v'1 v'2 v'3 t tindices . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor v'1 tindices -- ^ __indices__: A Tensor. Must be one of the following types: int32, int64. -- A tensor of indices into ref. -> Tensor v'2 t -- ^ __updates__: A Tensor. Must have the same type as tensor. A tensor of updated values -- to store in ref. -> Tensor v'3 tindices -- ^ __shape__: A vector. The shape of the resulting tensor. -> Tensor Build t -- ^ __output__: A new tensor with the given shape and updates applied according -- to the indices. scatterNd = scatterNd' id scatterNd' :: forall v'1 v'2 v'3 t tindices . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor v'1 tindices -- ^ __indices__: A Tensor. Must be one of the following types: int32, int64. -- A tensor of indices into ref. -> Tensor v'2 t -- ^ __updates__: A Tensor. Must have the same type as tensor. A tensor of updated values -- to store in ref. -> Tensor v'3 tindices -- ^ __shape__: A vector. The shape of the resulting tensor. -> Tensor Build t -- ^ __output__: A new tensor with the given shape and updates applied according -- to the indices. scatterNd' op'options indices updates shape | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs indices, buildInputs updates, buildInputs shape] return (opDef "ScatterNd" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "indices" description: "A Tensor. Must be one of the following types: int32, int64.\nA tensor of indices into ref." type_attr: "Tindices" } input_arg { name: "updates" description: "A Tensor. Must have the same type as tensor. A tensor of updated values\nto store in ref." type_attr: "T" } input_arg { name: "shape" description: "A vector. The shape of the resulting tensor." type_attr: "Tindices" } output_arg { name: "output" description: "A new tensor with the given shape and updates applied according\nto the indices." type_attr: "T" } attr { name: "T" type: "type" } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | Applies sparse addition between `updates` and individual values or slices -- -- within a given variable according to `indices`. -- -- `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. -- -- `indices` must be integer tensor, containing indices into `ref`. -- It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. -- -- The innermost dimension of `indices` (with length `K`) corresponds to -- indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th -- dimension of `ref`. -- -- `updates` is `Tensor` of rank `Q-1+P-K` with shape: -- -- ``` -- [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]. -- ``` -- -- For example, say we want to add 4 scattered elements to a rank-1 tensor to 8 -- elements. In Python, that addition would look like this: -- -- ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) -- indices = tf.constant([[4], [3], [1], [7]]) -- updates = tf.constant([9, 10, 11, 12]) -- add = tf.scatter_nd_add(ref, indices, updates) -- with tf.Session() as sess: -- print sess.run(add) -- -- The resulting update to ref would look like this: -- -- [1, 13, 3, 14, 14, 6, 7, 20] -- -- See [tf.scatter_nd](#scatter_nd) for more details about how to make updates to -- slices. scatterNdAdd :: forall v'2 v'3 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor Ref t -- ^ __ref__: A mutable Tensor. Should be from a Variable node. -> Tensor v'2 tindices -- ^ __indices__: A Tensor. Must be one of the following types: int32, int64. -- A tensor of indices into ref. -> Tensor v'3 t -- ^ __updates__: A Tensor. Must have the same type as ref. A tensor of updated values -- to add to ref. -> m' (Tensor Ref t) -- ^ __output_ref__: Same as ref. Returned as a convenience for operations that want -- to use the updated values after the update is done. scatterNdAdd = scatterNdAdd' id scatterNdAdd' :: forall v'2 v'3 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor Ref t -- ^ __ref__: A mutable Tensor. Should be from a Variable node. -> Tensor v'2 tindices -- ^ __indices__: A Tensor. Must be one of the following types: int32, int64. -- A tensor of indices into ref. -> Tensor v'3 t -- ^ __updates__: A Tensor. Must have the same type as ref. A tensor of updated values -- to add to ref. -> m' (Tensor Ref t) -- ^ __output_ref__: Same as ref. Returned as a convenience for operations that want -- to use the updated values after the update is done. scatterNdAdd' op'options ref indices updates | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs ref, buildInputs indices, buildInputs updates] buildOp [] (opDef "ScatterNdAdd" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "ref" description: "A mutable Tensor. Should be from a Variable node." type_attr: "T" is_ref: true } input_arg { name: "indices" description: "A Tensor. Must be one of the following types: int32, int64.\nA tensor of indices into ref." type_attr: "Tindices" } input_arg { name: "updates" description: "A Tensor. Must have the same type as ref. A tensor of updated values\nto add to ref." type_attr: "T" } output_arg { name: "output_ref" description: "Same as ref. Returned as a convenience for operations that want\nto use the updated values after the update is done." type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "An optional bool. Defaults to True. If True, the assignment will\nbe protected by a lock; otherwise the behavior is undefined,\nbut may exhibit less contention." } -} -- | Applies sparse subtraction between `updates` and individual values or slices -- -- within a given variable according to `indices`. -- -- `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. -- -- `indices` must be integer tensor, containing indices into `ref`. -- It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. -- -- The innermost dimension of `indices` (with length `K`) corresponds to -- indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th -- dimension of `ref`. -- -- `updates` is `Tensor` of rank `Q-1+P-K` with shape: -- -- ``` -- [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]. -- ``` -- -- For example, say we want to subtract 4 scattered elements from a rank-1 tensor -- with 8 elements. In Python, that subtraction would look like this: -- -- ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) -- indices = tf.constant([[4], [3], [1], [7]]) -- updates = tf.constant([9, 10, 11, 12]) -- sub = tf.scatter_nd_sub(ref, indices, updates) -- with tf.Session() as sess: -- print sess.run(sub) -- -- The resulting update to ref would look like this: -- -- [1, -9, 3, -6, -4, 6, 7, -4] -- -- See [tf.scatter_nd](#scatter_nd) for more details about how to make updates to -- slices. scatterNdSub :: forall v'2 v'3 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor Ref t -- ^ __ref__: A mutable Tensor. Should be from a Variable node. -> Tensor v'2 tindices -- ^ __indices__: A Tensor. Must be one of the following types: int32, int64. -- A tensor of indices into ref. -> Tensor v'3 t -- ^ __updates__: A Tensor. Must have the same type as ref. A tensor of updated values -- to subtract from ref. -> m' (Tensor Ref t) -- ^ __output_ref__: Same as ref. Returned as a convenience for operations that want -- to use the updated values after the update is done. scatterNdSub = scatterNdSub' id scatterNdSub' :: forall v'2 v'3 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor Ref t -- ^ __ref__: A mutable Tensor. Should be from a Variable node. -> Tensor v'2 tindices -- ^ __indices__: A Tensor. Must be one of the following types: int32, int64. -- A tensor of indices into ref. -> Tensor v'3 t -- ^ __updates__: A Tensor. Must have the same type as ref. A tensor of updated values -- to subtract from ref. -> m' (Tensor Ref t) -- ^ __output_ref__: Same as ref. Returned as a convenience for operations that want -- to use the updated values after the update is done. scatterNdSub' op'options ref indices updates | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs ref, buildInputs indices, buildInputs updates] buildOp [] (opDef "ScatterNdSub" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "ref" description: "A mutable Tensor. Should be from a Variable node." type_attr: "T" is_ref: true } input_arg { name: "indices" description: "A Tensor. Must be one of the following types: int32, int64.\nA tensor of indices into ref." type_attr: "Tindices" } input_arg { name: "updates" description: "A Tensor. Must have the same type as ref. A tensor of updated values\nto subtract from ref." type_attr: "T" } output_arg { name: "output_ref" description: "Same as ref. Returned as a convenience for operations that want\nto use the updated values after the update is done." type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "An optional bool. Defaults to True. If True, the assignment will\nbe protected by a lock; otherwise the behavior is undefined,\nbut may exhibit less contention." } -} -- | Applies sparse `updates` to individual values or slices within a given -- -- variable according to `indices`. -- -- `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. -- -- `indices` must be integer tensor, containing indices into `ref`. -- It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. -- -- The innermost dimension of `indices` (with length `K`) corresponds to -- indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th -- dimension of `ref`. -- -- `updates` is `Tensor` of rank `Q-1+P-K` with shape: -- -- ``` -- [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]. -- ``` -- -- For example, say we want to update 4 scattered elements to a rank-1 tensor to -- 8 elements. In Python, that update would look like this: -- -- ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) -- indices = tf.constant([[4], [3], [1] ,[7]]) -- updates = tf.constant([9, 10, 11, 12]) -- update = tf.scatter_nd_update(ref, indices, updates) -- with tf.Session() as sess: -- print sess.run(update) -- -- The resulting update to ref would look like this: -- -- [1, 11, 3, 10, 9, 6, 7, 12] -- -- See [tf.scatter_nd](#scatter_nd) for more details about how to make updates to -- slices. scatterNdUpdate :: forall v'2 v'3 t tindices m' . (MonadBuild m', TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor Ref t -- ^ __ref__: A mutable Tensor. Should be from a Variable node. -> Tensor v'2 tindices -- ^ __indices__: A Tensor. Must be one of the following types: int32, int64. -- A tensor of indices into ref. -> Tensor v'3 t -- ^ __updates__: A Tensor. Must have the same type as ref. A tensor of updated -- values to add to ref. -> m' (Tensor Ref t) -- ^ __output_ref__: Same as ref. Returned as a convenience for operations that want to -- use the updated values after the update is done. scatterNdUpdate = scatterNdUpdate' id scatterNdUpdate' :: forall v'2 v'3 t tindices m' . (MonadBuild m', TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor Ref t -- ^ __ref__: A mutable Tensor. Should be from a Variable node. -> Tensor v'2 tindices -- ^ __indices__: A Tensor. Must be one of the following types: int32, int64. -- A tensor of indices into ref. -> Tensor v'3 t -- ^ __updates__: A Tensor. Must have the same type as ref. A tensor of updated -- values to add to ref. -> m' (Tensor Ref t) -- ^ __output_ref__: Same as ref. Returned as a convenience for operations that want to -- use the updated values after the update is done. scatterNdUpdate' op'options ref indices updates | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs ref, buildInputs indices, buildInputs updates] buildOp [] (opDef "ScatterNdUpdate" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "ref" description: "A mutable Tensor. Should be from a Variable node." type_attr: "T" is_ref: true } input_arg { name: "indices" description: "A Tensor. Must be one of the following types: int32, int64.\nA tensor of indices into ref." type_attr: "Tindices" } input_arg { name: "updates" description: "A Tensor. Must have the same type as ref. A tensor of updated\nvalues to add to ref." type_attr: "T" } output_arg { name: "output_ref" description: "Same as ref. Returned as a convenience for operations that want to\nuse the updated values after the update is done." type_attr: "T" is_ref: true } attr { name: "T" type: "type" } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: true } description: "An optional bool. Defaults to True. If True, the assignment will\nbe protected by a lock; otherwise the behavior is undefined,\nbut may exhibit less contention." } -} -- | Subtracts sparse updates to a variable reference. -- -- # Scalar indices -- ref[indices, ...] -= updates[...] -- -- # Vector indices (for each i) -- ref[indices[i], ...] -= updates[i, ...] -- -- # High rank indices (for each i, ..., j) -- ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...] -- -- This operation outputs `ref` after the update is done. -- This makes it easier to chain operations that need to use the reset value. -- -- Duplicate entries are handled correctly: if multiple `indices` reference -- the same location, their (negated) contributions add. -- -- Requires `updates.shape = indices.shape + ref.shape[1:]`. -- --
-- --
scatterSub :: forall v'2 v'3 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor Ref t -- ^ __ref__: Should be from a `Variable` node. -> Tensor v'2 tindices -- ^ __indices__: A tensor of indices into the first dimension of `ref`. -> Tensor v'3 t -- ^ __updates__: A tensor of updated values to subtract from `ref`. -> m' (Tensor Ref t) -- ^ __output_ref__: = Same as `ref`. Returned as a convenience for operations that want -- to use the updated values after the update is done. scatterSub = scatterSub' id scatterSub' :: forall v'2 v'3 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor Ref t -- ^ __ref__: Should be from a `Variable` node. -> Tensor v'2 tindices -- ^ __indices__: A tensor of indices into the first dimension of `ref`. -> Tensor v'3 t -- ^ __updates__: A tensor of updated values to subtract from `ref`. -> m' (Tensor Ref t) -- ^ __output_ref__: = Same as `ref`. Returned as a convenience for operations that want -- to use the updated values after the update is done. scatterSub' op'options ref indices updates | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs ref, buildInputs indices, buildInputs updates] buildOp [] (opDef "ScatterSub" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "ref" description: "Should be from a `Variable` node." type_attr: "T" is_ref: true } input_arg { name: "indices" description: "A tensor of indices into the first dimension of `ref`." type_attr: "Tindices" } input_arg { name: "updates" description: "A tensor of updated values to subtract from `ref`." type_attr: "T" } output_arg { name: "output_ref" description: "= Same as `ref`. Returned as a convenience for operations that want\nto use the updated values after the update is done." type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If True, the subtraction will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention." } -} -- | Applies sparse updates to a variable reference. -- -- This operation computes -- -- # Scalar indices -- ref[indices, ...] = updates[...] -- -- # Vector indices (for each i) -- ref[indices[i], ...] = updates[i, ...] -- -- # High rank indices (for each i, ..., j) -- ref[indices[i, ..., j], ...] = updates[i, ..., j, ...] -- -- This operation outputs `ref` after the update is done. -- This makes it easier to chain operations that need to use the reset value. -- -- If values in `ref` is to be updated more than once, because there are -- duplicate entries in `indices`, the order at which the updates happen -- for each value is undefined. -- -- Requires `updates.shape = indices.shape + ref.shape[1:]`. -- --
-- --
scatterUpdate :: forall v'2 v'3 t tindices m' . (MonadBuild m', TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor Ref t -- ^ __ref__: Should be from a `Variable` node. -> Tensor v'2 tindices -- ^ __indices__: A tensor of indices into the first dimension of `ref`. -> Tensor v'3 t -- ^ __updates__: A tensor of updated values to store in `ref`. -> m' (Tensor Ref t) -- ^ __output_ref__: = Same as `ref`. Returned as a convenience for operations that want -- to use the updated values after the update is done. scatterUpdate = scatterUpdate' id scatterUpdate' :: forall v'2 v'3 t tindices m' . (MonadBuild m', TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor Ref t -- ^ __ref__: Should be from a `Variable` node. -> Tensor v'2 tindices -- ^ __indices__: A tensor of indices into the first dimension of `ref`. -> Tensor v'3 t -- ^ __updates__: A tensor of updated values to store in `ref`. -> m' (Tensor Ref t) -- ^ __output_ref__: = Same as `ref`. Returned as a convenience for operations that want -- to use the updated values after the update is done. scatterUpdate' op'options ref indices updates | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs ref, buildInputs indices, buildInputs updates] buildOp [] (opDef "ScatterUpdate" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "ref" description: "Should be from a `Variable` node." type_attr: "T" is_ref: true } input_arg { name: "indices" description: "A tensor of indices into the first dimension of `ref`." type_attr: "Tindices" } input_arg { name: "updates" description: "A tensor of updated values to store in `ref`." type_attr: "T" } output_arg { name: "output_ref" description: "= Same as `ref`. Returned as a convenience for operations that want\nto use the updated values after the update is done." type_attr: "T" is_ref: true } attr { name: "T" type: "type" } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: true } description: "If True, the assignment will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention." } -} -- | Computes fingerprints of the input strings. sdcaFprint :: Tensor v'1 Data.ByteString.ByteString -- ^ __input__: vector of strings to compute fingerprints on. -> Tensor Build Data.Int.Int64 -- ^ __output__: a (N,2) shaped matrix where N is the number of elements in the input -- vector. Each row contains the low and high parts of the fingerprint. sdcaFprint = sdcaFprint' id sdcaFprint' :: OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __input__: vector of strings to compute fingerprints on. -> Tensor Build Data.Int.Int64 -- ^ __output__: a (N,2) shaped matrix where N is the number of elements in the input -- vector. Each row contains the low and high parts of the fingerprint. sdcaFprint' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "SdcaFprint" & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "vector of strings to compute fingerprints on." type: DT_STRING } output_arg { name: "output" description: "a (N,2) shaped matrix where N is the number of elements in the input\nvector. Each row contains the low and high parts of the fingerprint." type: DT_INT64 } -} -- | Distributed version of Stochastic Dual Coordinate Ascent (SDCA) optimizer for -- -- linear models with L1 + L2 regularization. As global optimization objective is -- strongly-convex, the optimizer optimizes the dual objective at each step. The -- optimizer applies each update one example at a time. Examples are sampled -- uniformly, and the optimizer is learning rate free and enjoys linear convergence -- rate. -- -- Proximal Stochastic Dual Coordinate Ascent, Shalev-Shwartz, Shai; Zhang, Tong. -- 2012 arXiv1211.2717S: http://arxiv.org/pdf/1211.2717v1.pdf -- -- Loss objective = \sum f_{i}(wx_{i}) + (l2 / 2) * |w|^2 + l1 * |w| -- -- Adding vs. Averaging in Distributed Primal-Dual Optimization. -- Chenxin Ma, Virginia Smith, Martin Jaggi, Michael I. Jordan, Peter Richtarik, -- Martin Takac http://arxiv.org/abs/1502.03508 -- -- Stochastic Dual Coordinate Ascent with Adaptive Probabilities -- Dominik Csiba, Zheng Qu, Peter Richtarik https://arxiv.org/abs/1502.08053 sdcaOptimizer :: Float -- ^ __l1__: Symmetric l1 regularization strength. -> Float -- ^ __l2__: Symmetric l2 regularization strength. -> Data.Int.Int64 -- ^ __num_inner_iterations__: Number of iterations per mini-batch. -> Data.Int.Int64 -- ^ __num_loss_partitions__: Number of partitions of the global loss function. -> [Tensor v'1 Data.Int.Int64] -- ^ __sparse_example_indices__: a list of vectors which contain example indices. -> [Tensor v'2 Data.Int.Int64] -- ^ __sparse_feature_indices__: a list of vectors which contain feature indices. -> [Tensor v'3 Float] -- ^ __sparse_feature_values__: a list of vectors which contains feature value -- associated with each feature group. -> [Tensor v'4 Float] -- ^ __dense_features__: a list of matrices which contains the dense feature values. -> Tensor v'5 Float -- ^ __example_weights__: a vector which contains the weight associated with each -- example. -> Tensor v'6 Float -- ^ __example_labels__: a vector which contains the label/target associated with each -- example. -> [Tensor v'7 Data.Int.Int64] -- ^ __sparse_indices__: a list of vectors where each value is the indices which has -- corresponding weights in sparse_weights. This field maybe ommitted for the -- dense approach. -> [Tensor v'8 Float] -- ^ __sparse_weights__: a list of vectors where each value is the weight associated with -- a sparse feature group. -> [Tensor v'9 Float] -- ^ __dense_weights__: a list of vectors where the values are the weights associated -- with a dense feature group. -> Tensor v'10 Float -- ^ __example_state_data__: a list of vectors containing the example state data. -> (Tensor Build Float, [Tensor Build Float], [Tensor Build Float]) -- ^ (__out_example_state_data__, __out_delta_sparse_weights__, __out_delta_dense_weights__) -- -- * __out_example_state_data__: a list of vectors containing the updated example state -- data. -- -- * __out_delta_sparse_weights__: a list of vectors where each value is the delta -- weights associated with a sparse feature group. -- -- * __out_delta_dense_weights__: a list of vectors where the values are the delta -- weights associated with a dense feature group. sdcaOptimizer = sdcaOptimizer' id sdcaOptimizer' :: OpParams -> Float -- ^ __l1__: Symmetric l1 regularization strength. -> Float -- ^ __l2__: Symmetric l2 regularization strength. -> Data.Int.Int64 -- ^ __num_inner_iterations__: Number of iterations per mini-batch. -> Data.Int.Int64 -- ^ __num_loss_partitions__: Number of partitions of the global loss function. -> [Tensor v'1 Data.Int.Int64] -- ^ __sparse_example_indices__: a list of vectors which contain example indices. -> [Tensor v'2 Data.Int.Int64] -- ^ __sparse_feature_indices__: a list of vectors which contain feature indices. -> [Tensor v'3 Float] -- ^ __sparse_feature_values__: a list of vectors which contains feature value -- associated with each feature group. -> [Tensor v'4 Float] -- ^ __dense_features__: a list of matrices which contains the dense feature values. -> Tensor v'5 Float -- ^ __example_weights__: a vector which contains the weight associated with each -- example. -> Tensor v'6 Float -- ^ __example_labels__: a vector which contains the label/target associated with each -- example. -> [Tensor v'7 Data.Int.Int64] -- ^ __sparse_indices__: a list of vectors where each value is the indices which has -- corresponding weights in sparse_weights. This field maybe ommitted for the -- dense approach. -> [Tensor v'8 Float] -- ^ __sparse_weights__: a list of vectors where each value is the weight associated with -- a sparse feature group. -> [Tensor v'9 Float] -- ^ __dense_weights__: a list of vectors where the values are the weights associated -- with a dense feature group. -> Tensor v'10 Float -- ^ __example_state_data__: a list of vectors containing the example state data. -> (Tensor Build Float, [Tensor Build Float], [Tensor Build Float]) -- ^ (__out_example_state_data__, __out_delta_sparse_weights__, __out_delta_dense_weights__) -- -- * __out_example_state_data__: a list of vectors containing the updated example state -- data. -- -- * __out_delta_sparse_weights__: a list of vectors where each value is the delta -- weights associated with a sparse feature group. -- -- * __out_delta_dense_weights__: a list of vectors where the values are the delta -- weights associated with a dense feature group. sdcaOptimizer' op'options l1 l2 num_inner_iterations num_loss_partitions sparse_example_indices sparse_feature_indices sparse_feature_values dense_features example_weights example_labels sparse_indices sparse_weights dense_weights example_state_data | eqLengthGuard [("num_sparse_features", [("sparse_example_indices", length sparse_example_indices), ("sparse_feature_indices", length sparse_feature_indices), ("sparse_indices", length sparse_indices), ("sparse_weights", length sparse_weights)]), ("num_sparse_features_with_values", [("sparse_feature_values", length sparse_feature_values)]), ("num_dense_features", [("dense_features", length dense_features), ("dense_weights", length dense_weights)])] = pureOp [num_sparse_features, num_dense_features] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs sparse_example_indices, buildInputs sparse_feature_indices, buildInputs sparse_feature_values, buildInputs dense_features, buildInputs example_weights, buildInputs example_labels, buildInputs sparse_indices, buildInputs sparse_weights, buildInputs dense_weights, buildInputs example_state_data] return (opDef "SdcaOptimizer" & opAttr "l1" .~ l1 & opAttr "l2" .~ l2 & opAttr "num_inner_iterations" .~ num_inner_iterations & opAttr "num_loss_partitions" .~ num_loss_partitions & opAttr "num_sparse_features" .~ num_sparse_features & opAttr "num_sparse_features_with_values" .~ num_sparse_features_with_values & opAttr "num_dense_features" .~ num_dense_features & op'options & opInputs .~ op'inputs) where num_sparse_features = fromIntegral (length sparse_example_indices) :: Int64 num_sparse_features_with_values = fromIntegral (length sparse_feature_values) :: Int64 num_dense_features = fromIntegral (length dense_features) :: Int64 {- input_arg { name: "sparse_example_indices" description: "a list of vectors which contain example indices." type: DT_INT64 number_attr: "num_sparse_features" } input_arg { name: "sparse_feature_indices" description: "a list of vectors which contain feature indices." type: DT_INT64 number_attr: "num_sparse_features" } input_arg { name: "sparse_feature_values" description: "a list of vectors which contains feature value\nassociated with each feature group." type: DT_FLOAT number_attr: "num_sparse_features_with_values" } input_arg { name: "dense_features" description: "a list of matrices which contains the dense feature values." type: DT_FLOAT number_attr: "num_dense_features" } input_arg { name: "example_weights" description: "a vector which contains the weight associated with each\nexample." type: DT_FLOAT } input_arg { name: "example_labels" description: "a vector which contains the label/target associated with each\nexample." type: DT_FLOAT } input_arg { name: "sparse_indices" description: "a list of vectors where each value is the indices which has\ncorresponding weights in sparse_weights. This field maybe ommitted for the\ndense approach." type: DT_INT64 number_attr: "num_sparse_features" } input_arg { name: "sparse_weights" description: "a list of vectors where each value is the weight associated with\na sparse feature group." type: DT_FLOAT number_attr: "num_sparse_features" } input_arg { name: "dense_weights" description: "a list of vectors where the values are the weights associated\nwith a dense feature group." type: DT_FLOAT number_attr: "num_dense_features" } input_arg { name: "example_state_data" description: "a list of vectors containing the example state data." type: DT_FLOAT } output_arg { name: "out_example_state_data" description: "a list of vectors containing the updated example state\ndata." type: DT_FLOAT } output_arg { name: "out_delta_sparse_weights" description: "a list of vectors where each value is the delta\nweights associated with a sparse feature group." type: DT_FLOAT number_attr: "num_sparse_features" } output_arg { name: "out_delta_dense_weights" description: "a list of vectors where the values are the delta\nweights associated with a dense feature group." type: DT_FLOAT number_attr: "num_dense_features" } attr { name: "loss_type" type: "string" description: "Type of the primal loss. Currently SdcaSolver supports logistic,\nsquared and hinge losses." allowed_values { list { s: "logistic_loss" s: "squared_loss" s: "hinge_loss" s: "smooth_hinge_loss" } } } attr { name: "adaptative" type: "bool" default_value { b: false } description: "Whether to use Adapative SDCA for the inner loop." } attr { name: "num_sparse_features" type: "int" description: "Number of sparse feature groups to train on." has_minimum: true } attr { name: "num_sparse_features_with_values" type: "int" description: "Number of sparse feature groups with values\nassociated with it, otherwise implicitly treats values as 1.0." has_minimum: true } attr { name: "num_dense_features" type: "int" description: "Number of dense feature groups to train on." has_minimum: true } attr { name: "l1" type: "float" description: "Symmetric l1 regularization strength." } attr { name: "l2" type: "float" description: "Symmetric l2 regularization strength." } attr { name: "num_loss_partitions" type: "int" description: "Number of partitions of the global loss function." has_minimum: true minimum: 1 } attr { name: "num_inner_iterations" type: "int" description: "Number of iterations per mini-batch." has_minimum: true minimum: 1 } -} -- | Applies L1 regularization shrink step on the parameters. sdcaShrinkL1 :: forall m' . (MonadBuild m') => Float -- ^ __l1__: Symmetric l1 regularization strength. -> Float -- ^ __l2__: Symmetric l2 regularization strength. Should be a positive float. -> [Tensor Ref Float] -- ^ __weights__: a list of vectors where each value is the weight associated with a -- feature group. -> m' (ControlNode) sdcaShrinkL1 = sdcaShrinkL1' id sdcaShrinkL1' :: forall m' . (MonadBuild m') => OpParams -> Float -- ^ __l1__: Symmetric l1 regularization strength. -> Float -- ^ __l2__: Symmetric l2 regularization strength. Should be a positive float. -> [Tensor Ref Float] -- ^ __weights__: a list of vectors where each value is the weight associated with a -- feature group. -> m' (ControlNode) sdcaShrinkL1' op'options l1 l2 weights | eqLengthGuard [("num_features", [("weights", length weights)])] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs weights] buildOp [] (opDef "SdcaShrinkL1" & opAttr "l1" .~ l1 & opAttr "l2" .~ l2 & opAttr "num_features" .~ num_features & op'options & opInputs .~ op'inputs) where num_features = fromIntegral (length weights) :: Int64 {- input_arg { name: "weights" description: "a list of vectors where each value is the weight associated with a\nfeature group." type: DT_FLOAT number_attr: "num_features" is_ref: true } attr { name: "num_features" type: "int" description: "Number of feature groups to apply shrinking step." has_minimum: true } attr { name: "l1" type: "float" description: "Symmetric l1 regularization strength." } attr { name: "l2" type: "float" description: "Symmetric l2 regularization strength. Should be a positive float." } -} -- | Computes the maximum along segments of a tensor. -- -- Read @{$math_ops#segmentation$the section on segmentation} for an explanation of -- segments. -- -- Computes a tensor such that -- \\(output_i = \max_j(data_j)\\) where `max` is over `j` such -- that `segment_ids[j] == i`. -- -- If the max is empty for a given segment ID `i`, `output[i] = 0`. -- --
-- --
segmentMax :: forall v'1 v'2 t tindices . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor v'1 t -- ^ __data__ -> Tensor v'2 tindices -- ^ __segment_ids__: A 1-D tensor whose rank is equal to the rank of `data`'s -- first dimension. Values should be sorted and can be repeated. -> Tensor Build t -- ^ __output__: Has same shape as data, except for dimension 0 which -- has size `k`, the number of segments. segmentMax = segmentMax' id segmentMax' :: forall v'1 v'2 t tindices . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor v'1 t -- ^ __data__ -> Tensor v'2 tindices -- ^ __segment_ids__: A 1-D tensor whose rank is equal to the rank of `data`'s -- first dimension. Values should be sorted and can be repeated. -> Tensor Build t -- ^ __output__: Has same shape as data, except for dimension 0 which -- has size `k`, the number of segments. segmentMax' op'options data' segment_ids | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs data', buildInputs segment_ids] return (opDef "SegmentMax" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "data" type_attr: "T" } input_arg { name: "segment_ids" description: "A 1-D tensor whose rank is equal to the rank of `data`\'s\nfirst dimension. Values should be sorted and can be repeated." type_attr: "Tindices" } output_arg { name: "output" description: "Has same shape as data, except for dimension 0 which\nhas size `k`, the number of segments." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | Computes the mean along segments of a tensor. -- -- Read @{$math_ops#segmentation$the section on segmentation} for an explanation of -- segments. -- -- Computes a tensor such that -- \\(output_i = \frac{\sum_j data_j}{N}\\) where `mean` is -- over `j` such that `segment_ids[j] == i` and `N` is the total number of -- values summed. -- -- If the mean is empty for a given segment ID `i`, `output[i] = 0`. -- --
-- --
segmentMean :: forall v'1 v'2 t tindices . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor v'1 t -- ^ __data__ -> Tensor v'2 tindices -- ^ __segment_ids__: A 1-D tensor whose rank is equal to the rank of `data`'s -- first dimension. Values should be sorted and can be repeated. -> Tensor Build t -- ^ __output__: Has same shape as data, except for dimension 0 which -- has size `k`, the number of segments. segmentMean = segmentMean' id segmentMean' :: forall v'1 v'2 t tindices . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor v'1 t -- ^ __data__ -> Tensor v'2 tindices -- ^ __segment_ids__: A 1-D tensor whose rank is equal to the rank of `data`'s -- first dimension. Values should be sorted and can be repeated. -> Tensor Build t -- ^ __output__: Has same shape as data, except for dimension 0 which -- has size `k`, the number of segments. segmentMean' op'options data' segment_ids | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs data', buildInputs segment_ids] return (opDef "SegmentMean" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "data" type_attr: "T" } input_arg { name: "segment_ids" description: "A 1-D tensor whose rank is equal to the rank of `data`\'s\nfirst dimension. Values should be sorted and can be repeated." type_attr: "Tindices" } output_arg { name: "output" description: "Has same shape as data, except for dimension 0 which\nhas size `k`, the number of segments." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | Computes the minimum along segments of a tensor. -- -- Read @{$math_ops#segmentation$the section on segmentation} for an explanation of -- segments. -- -- Computes a tensor such that -- \\(output_i = \min_j(data_j)\\) where `min` is over `j` such -- that `segment_ids[j] == i`. -- -- If the min is empty for a given segment ID `i`, `output[i] = 0`. -- --
-- --
segmentMin :: forall v'1 v'2 t tindices . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor v'1 t -- ^ __data__ -> Tensor v'2 tindices -- ^ __segment_ids__: A 1-D tensor whose rank is equal to the rank of `data`'s -- first dimension. Values should be sorted and can be repeated. -> Tensor Build t -- ^ __output__: Has same shape as data, except for dimension 0 which -- has size `k`, the number of segments. segmentMin = segmentMin' id segmentMin' :: forall v'1 v'2 t tindices . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor v'1 t -- ^ __data__ -> Tensor v'2 tindices -- ^ __segment_ids__: A 1-D tensor whose rank is equal to the rank of `data`'s -- first dimension. Values should be sorted and can be repeated. -> Tensor Build t -- ^ __output__: Has same shape as data, except for dimension 0 which -- has size `k`, the number of segments. segmentMin' op'options data' segment_ids | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs data', buildInputs segment_ids] return (opDef "SegmentMin" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "data" type_attr: "T" } input_arg { name: "segment_ids" description: "A 1-D tensor whose rank is equal to the rank of `data`\'s\nfirst dimension. Values should be sorted and can be repeated." type_attr: "Tindices" } output_arg { name: "output" description: "Has same shape as data, except for dimension 0 which\nhas size `k`, the number of segments." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | Computes the product along segments of a tensor. -- -- Read @{$math_ops#segmentation$the section on segmentation} for an explanation of -- segments. -- -- Computes a tensor such that -- \\(output_i = \prod_j data_j\\) where the product is over `j` such -- that `segment_ids[j] == i`. -- -- If the product is empty for a given segment ID `i`, `output[i] = 1`. -- --
-- --
segmentProd :: forall v'1 v'2 t tindices . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor v'1 t -- ^ __data__ -> Tensor v'2 tindices -- ^ __segment_ids__: A 1-D tensor whose rank is equal to the rank of `data`'s -- first dimension. Values should be sorted and can be repeated. -> Tensor Build t -- ^ __output__: Has same shape as data, except for dimension 0 which -- has size `k`, the number of segments. segmentProd = segmentProd' id segmentProd' :: forall v'1 v'2 t tindices . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor v'1 t -- ^ __data__ -> Tensor v'2 tindices -- ^ __segment_ids__: A 1-D tensor whose rank is equal to the rank of `data`'s -- first dimension. Values should be sorted and can be repeated. -> Tensor Build t -- ^ __output__: Has same shape as data, except for dimension 0 which -- has size `k`, the number of segments. segmentProd' op'options data' segment_ids | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs data', buildInputs segment_ids] return (opDef "SegmentProd" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "data" type_attr: "T" } input_arg { name: "segment_ids" description: "A 1-D tensor whose rank is equal to the rank of `data`\'s\nfirst dimension. Values should be sorted and can be repeated." type_attr: "Tindices" } output_arg { name: "output" description: "Has same shape as data, except for dimension 0 which\nhas size `k`, the number of segments." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | Computes the sum along segments of a tensor. -- -- Read @{$math_ops#segmentation$the section on segmentation} for an explanation of -- segments. -- -- Computes a tensor such that -- \\(output_i = \sum_j data_j\\) where sum is over `j` such -- that `segment_ids[j] == i`. -- -- If the sum is empty for a given segment ID `i`, `output[i] = 0`. -- --
-- --
segmentSum :: forall v'1 v'2 t tindices . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor v'1 t -- ^ __data__ -> Tensor v'2 tindices -- ^ __segment_ids__: A 1-D tensor whose rank is equal to the rank of `data`'s -- first dimension. Values should be sorted and can be repeated. -> Tensor Build t -- ^ __output__: Has same shape as data, except for dimension 0 which -- has size `k`, the number of segments. segmentSum = segmentSum' id segmentSum' :: forall v'1 v'2 t tindices . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor v'1 t -- ^ __data__ -> Tensor v'2 tindices -- ^ __segment_ids__: A 1-D tensor whose rank is equal to the rank of `data`'s -- first dimension. Values should be sorted and can be repeated. -> Tensor Build t -- ^ __output__: Has same shape as data, except for dimension 0 which -- has size `k`, the number of segments. segmentSum' op'options data' segment_ids | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs data', buildInputs segment_ids] return (opDef "SegmentSum" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "data" type_attr: "T" } input_arg { name: "segment_ids" description: "A 1-D tensor whose rank is equal to the rank of `data`\'s\nfirst dimension. Values should be sorted and can be repeated." type_attr: "Tindices" } output_arg { name: "output" description: "Has same shape as data, except for dimension 0 which\nhas size `k`, the number of segments." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | Selects elements from `t` or `e`, depending on `condition`. -- -- The `t`, and `e` tensors must all have the same shape, and the -- output will also have that shape. -- -- The `condition` tensor must be a scalar if `t` and `e` are scalars. -- If `t` and `e` are vectors or higher rank, then `condition` must be either a -- scalar, a vector with size matching the first dimension of `t`, or must have -- the same shape as `t`. -- -- The `condition` tensor acts as a mask that chooses, based on the value at each -- element, whether the corresponding element / row in the output should be -- taken from `t` (if true) or `e` (if false). -- -- If `condition` is a vector and `t` and `e` are higher rank matrices, then -- it chooses which row (outer dimension) to copy from `t` and `e`. -- If `condition` has the same shape as `t` and `e`, then it chooses which -- element to copy from `t` and `e`. -- -- For example: -- -- ```prettyprint -- # 'condition' tensor is [[True, False] -- # [False, True]] -- # 't' is [[1, 2], -- # [3, 4]] -- # 'e' is [[5, 6], -- # [7, 8]] -- select(condition, t, e) ==> [[1, 6], -- [7, 4]] -- -- -- # 'condition' tensor is [True, False] -- # 't' is [[1, 2], -- # [3, 4]] -- # 'e' is [[5, 6], -- # [7, 8]] -- select(condition, t, e) ==> [[1, 2], -- [7, 8]] -- -- ``` select :: forall v'1 v'2 v'3 t . (TensorType t) => Tensor v'1 Bool -- ^ __condition__ -> Tensor v'2 t -- ^ __t__: = A `Tensor` which may have the same shape as `condition`. -- If `condition` is rank 1, `t` may have higher rank, -- but its first dimension must match the size of `condition`. -> Tensor v'3 t -- ^ __e__: = A `Tensor` with the same type and shape as `t`. -> Tensor Build t -- ^ __output__: = A `Tensor` with the same type and shape as `t` and `e`. select = select' id select' :: forall v'1 v'2 v'3 t . (TensorType t) => OpParams -> Tensor v'1 Bool -- ^ __condition__ -> Tensor v'2 t -- ^ __t__: = A `Tensor` which may have the same shape as `condition`. -- If `condition` is rank 1, `t` may have higher rank, -- but its first dimension must match the size of `condition`. -> Tensor v'3 t -- ^ __e__: = A `Tensor` with the same type and shape as `t`. -> Tensor Build t -- ^ __output__: = A `Tensor` with the same type and shape as `t` and `e`. select' op'options condition t e | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs condition, buildInputs t, buildInputs e] return (opDef "Select" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "condition" type: DT_BOOL } input_arg { name: "t" description: "= A `Tensor` which may have the same shape as `condition`.\nIf `condition` is rank 1, `t` may have higher rank,\nbut its first dimension must match the size of `condition`." type_attr: "T" } input_arg { name: "e" description: "= A `Tensor` with the same type and shape as `t`." type_attr: "T" } output_arg { name: "output" description: "= A `Tensor` with the same type and shape as `t` and `e`." type_attr: "T" } attr { name: "T" type: "type" } -} -- | Computes the Eigen Decomposition of a batch of square self-adjoint matrices. -- -- The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions -- form square matrices, with the same constraints as the single matrix -- SelfAdjointEig. -- -- The result is a [..., M+1, M] matrix with [..., 0,:] containing the -- eigenvalues, and subsequent [...,1:, :] containing the eigenvectors. selfAdjointEig :: forall v'1 t . (OneOf '[Double, Float] t) => Tensor v'1 t -- ^ __input__: Shape is `[..., M, M]`. -> Tensor Build t -- ^ __output__: Shape is `[..., M+1, M]`. selfAdjointEig = selfAdjointEig' id selfAdjointEig' :: forall v'1 t . (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__: Shape is `[..., M, M]`. -> Tensor Build t -- ^ __output__: Shape is `[..., M+1, M]`. selfAdjointEig' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "SelfAdjointEig" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "Shape is `[..., M, M]`." type_attr: "T" } output_arg { name: "output" description: "Shape is `[..., M+1, M]`." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } } } -} -- | Computes the eigen decomposition of one or more square self-adjoint matrices. -- -- Computes the eigenvalues and (optionally) eigenvectors of each inner matrix in -- `input` such that `input[..., :, :] = v[..., :, :] * diag(e[..., :])`. -- -- ```prettyprint -- # a is a tensor. -- # e is a tensor of eigenvalues. -- # v is a tensor of eigenvectors. -- e, v = self_adjoint_eig(a) -- e = self_adjoint_eig(a, compute_v=False) -- ``` selfAdjointEigV2 :: forall v'1 t . (OneOf '[Double, Float] t) => Tensor v'1 t -- ^ __input__: `Tensor` input of shape `[N, N]`. -> (Tensor Build t, Tensor Build t) -- ^ (__e__, __v__) -- -- * __e__: Eigenvalues. Shape is `[N]`. -- -- * __v__: Eigenvectors. Shape is `[N, N]`. selfAdjointEigV2 = selfAdjointEigV2' id selfAdjointEigV2' :: forall v'1 t . (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__: `Tensor` input of shape `[N, N]`. -> (Tensor Build t, Tensor Build t) -- ^ (__e__, __v__) -- -- * __e__: Eigenvalues. Shape is `[N]`. -- -- * __v__: Eigenvectors. Shape is `[N, N]`. selfAdjointEigV2' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "SelfAdjointEigV2" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "`Tensor` input of shape `[N, N]`." type_attr: "T" } output_arg { name: "e" description: "Eigenvalues. Shape is `[N]`." type_attr: "T" } output_arg { name: "v" description: "Eigenvectors. Shape is `[N, N]`." type_attr: "T" } attr { name: "compute_v" type: "bool" default_value { b: true } description: "If `True` then eigenvectors will be computed and returned in `v`.\nOtherwise, only the eigenvalues will be computed." } attr { name: "T" type: "type" allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } } } -} -- | Serialize an `N`-minibatch `SparseTensor` into an `[N, 3]` string `Tensor`. -- -- The `SparseTensor` must have rank `R` greater than 1, and the first dimension -- is treated as the minibatch dimension. Elements of the `SparseTensor` -- must be sorted in increasing order of this first dimension. The serialized -- `SparseTensor` objects going into each row of `serialized_sparse` will have -- rank `R-1`. -- -- The minibatch size `N` is extracted from `sparse_shape[0]`. serializeManySparse :: forall v'1 v'2 v'3 t . (TensorType t) => Tensor v'1 Data.Int.Int64 -- ^ __sparse_indices__: 2-D. The `indices` of the minibatch `SparseTensor`. -> Tensor v'2 t -- ^ __sparse_values__: 1-D. The `values` of the minibatch `SparseTensor`. -> Tensor v'3 Data.Int.Int64 -- ^ __sparse_shape__: 1-D. The `shape` of the minibatch `SparseTensor`. -> Tensor Build Data.ByteString.ByteString -- ^ __serialized_sparse__ serializeManySparse = serializeManySparse' id serializeManySparse' :: forall v'1 v'2 v'3 t . (TensorType t) => OpParams -> Tensor v'1 Data.Int.Int64 -- ^ __sparse_indices__: 2-D. The `indices` of the minibatch `SparseTensor`. -> Tensor v'2 t -- ^ __sparse_values__: 1-D. The `values` of the minibatch `SparseTensor`. -> Tensor v'3 Data.Int.Int64 -- ^ __sparse_shape__: 1-D. The `shape` of the minibatch `SparseTensor`. -> Tensor Build Data.ByteString.ByteString -- ^ __serialized_sparse__ serializeManySparse' op'options sparse_indices sparse_values sparse_shape | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs sparse_indices, buildInputs sparse_values, buildInputs sparse_shape] return (opDef "SerializeManySparse" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "sparse_indices" description: "2-D. The `indices` of the minibatch `SparseTensor`." type: DT_INT64 } input_arg { name: "sparse_values" description: "1-D. The `values` of the minibatch `SparseTensor`." type_attr: "T" } input_arg { name: "sparse_shape" description: "1-D. The `shape` of the minibatch `SparseTensor`." type: DT_INT64 } output_arg { name: "serialized_sparse" type: DT_STRING } attr { name: "T" type: "type" } -} -- | Serialize a `SparseTensor` into a string 3-vector (1-D `Tensor`) object. serializeSparse :: forall v'1 v'2 v'3 t . (TensorType t) => Tensor v'1 Data.Int.Int64 -- ^ __sparse_indices__: 2-D. The `indices` of the `SparseTensor`. -> Tensor v'2 t -- ^ __sparse_values__: 1-D. The `values` of the `SparseTensor`. -> Tensor v'3 Data.Int.Int64 -- ^ __sparse_shape__: 1-D. The `shape` of the `SparseTensor`. -> Tensor Build Data.ByteString.ByteString -- ^ __serialized_sparse__ serializeSparse = serializeSparse' id serializeSparse' :: forall v'1 v'2 v'3 t . (TensorType t) => OpParams -> Tensor v'1 Data.Int.Int64 -- ^ __sparse_indices__: 2-D. The `indices` of the `SparseTensor`. -> Tensor v'2 t -- ^ __sparse_values__: 1-D. The `values` of the `SparseTensor`. -> Tensor v'3 Data.Int.Int64 -- ^ __sparse_shape__: 1-D. The `shape` of the `SparseTensor`. -> Tensor Build Data.ByteString.ByteString -- ^ __serialized_sparse__ serializeSparse' op'options sparse_indices sparse_values sparse_shape | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs sparse_indices, buildInputs sparse_values, buildInputs sparse_shape] return (opDef "SerializeSparse" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "sparse_indices" description: "2-D. The `indices` of the `SparseTensor`." type: DT_INT64 } input_arg { name: "sparse_values" description: "1-D. The `values` of the `SparseTensor`." type_attr: "T" } input_arg { name: "sparse_shape" description: "1-D. The `shape` of the `SparseTensor`." type: DT_INT64 } output_arg { name: "serialized_sparse" type: DT_STRING } attr { name: "T" type: "type" } -} -- | Number of unique elements along last dimension of input `set`. -- -- Input `set` is a `SparseTensor` represented by `set_indices`, `set_values`, -- and `set_shape`. The last dimension contains values in a set, duplicates are -- allowed but ignored. -- -- If `validate_indices` is `True`, this op validates the order and range of `set` -- indices. setSize :: forall v'1 v'2 v'3 t . (OneOf '[Data.ByteString.ByteString, Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8] t) => Tensor v'1 Data.Int.Int64 -- ^ __set_indices__: 2D `Tensor`, indices of a `SparseTensor`. -> Tensor v'2 t -- ^ __set_values__: 1D `Tensor`, values of a `SparseTensor`. -> Tensor v'3 Data.Int.Int64 -- ^ __set_shape__: 1D `Tensor`, shape of a `SparseTensor`. -> Tensor Build Data.Int.Int32 -- ^ __size__: For `set` ranked `n`, this is a `Tensor` with rank `n-1`, and the same 1st -- `n-1` dimensions as `set`. Each value is the number of unique elements in -- the corresponding `[0...n-1]` dimension of `set`. setSize = setSize' id setSize' :: forall v'1 v'2 v'3 t . (OneOf '[Data.ByteString.ByteString, Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8] t) => OpParams -> Tensor v'1 Data.Int.Int64 -- ^ __set_indices__: 2D `Tensor`, indices of a `SparseTensor`. -> Tensor v'2 t -- ^ __set_values__: 1D `Tensor`, values of a `SparseTensor`. -> Tensor v'3 Data.Int.Int64 -- ^ __set_shape__: 1D `Tensor`, shape of a `SparseTensor`. -> Tensor Build Data.Int.Int32 -- ^ __size__: For `set` ranked `n`, this is a `Tensor` with rank `n-1`, and the same 1st -- `n-1` dimensions as `set`. Each value is the number of unique elements in -- the corresponding `[0...n-1]` dimension of `set`. setSize' op'options set_indices set_values set_shape | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs set_indices, buildInputs set_values, buildInputs set_shape] return (opDef "SetSize" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "set_indices" description: "2D `Tensor`, indices of a `SparseTensor`." type: DT_INT64 } input_arg { name: "set_values" description: "1D `Tensor`, values of a `SparseTensor`." type_attr: "T" } input_arg { name: "set_shape" description: "1D `Tensor`, shape of a `SparseTensor`." type: DT_INT64 } output_arg { name: "size" description: "For `set` ranked `n`, this is a `Tensor` with rank `n-1`, and the same 1st\n`n-1` dimensions as `set`. Each value is the number of unique elements in\nthe corresponding `[0...n-1]` dimension of `set`." type: DT_INT32 } attr { name: "validate_indices" type: "bool" default_value { b: true } } attr { name: "T" type: "type" allowed_values { list { type: DT_INT8 type: DT_INT16 type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_UINT16 type: DT_STRING } } } -} -- | Returns the shape of a tensor. -- -- This operation returns a 1-D integer tensor representing the shape of `input`. -- -- For example: -- -- ```prettyprint -- # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] -- shape(t) ==> [2, 2, 3] -- ``` shape :: forall v'1 t out_type . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] out_type) => Tensor v'1 t -- ^ __input__ -> Tensor Build out_type -- ^ __output__ shape = shape' id shape' :: forall v'1 t out_type . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] out_type) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor Build out_type -- ^ __output__ shape' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "Shape" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "out_type" .~ tensorType (undefined :: out_type) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type_attr: "out_type" } attr { name: "T" type: "type" } attr { name: "out_type" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | Returns shape of tensors. -- -- This operation returns N 1-D integer tensors representing shape of `input[i]s`. shapeN :: forall v'1 t out_type . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] out_type) => [Tensor v'1 t] -- ^ __input__ -> [Tensor Build out_type] -- ^ __output__ shapeN = shapeN' id shapeN' :: forall v'1 t out_type . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] out_type) => OpParams -> [Tensor v'1 t] -- ^ __input__ -> [Tensor Build out_type] -- ^ __output__ shapeN' op'options input | eqLengthGuard [("N", [("input", length input)])] = pureOp [n] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "ShapeN" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "out_type" .~ tensorType (undefined :: out_type) & opAttr "N" .~ n & op'options & opInputs .~ op'inputs) where n = fromIntegral (length input) :: Int64 {- input_arg { name: "input" type_attr: "T" number_attr: "N" } output_arg { name: "output" type_attr: "out_type" number_attr: "N" } attr { name: "N" type: "int" has_minimum: true minimum: 1 } attr { name: "T" type: "type" } attr { name: "out_type" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | Generate a sharded filename. The filename is printf formatted as -- -- %s-%05d-of-%05d, basename, shard, num_shards. shardedFilename :: Tensor v'1 Data.ByteString.ByteString -- ^ __basename__ -> Tensor v'2 Data.Int.Int32 -- ^ __shard__ -> Tensor v'3 Data.Int.Int32 -- ^ __num_shards__ -> Tensor Build Data.ByteString.ByteString -- ^ __filename__ shardedFilename = shardedFilename' id shardedFilename' :: OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __basename__ -> Tensor v'2 Data.Int.Int32 -- ^ __shard__ -> Tensor v'3 Data.Int.Int32 -- ^ __num_shards__ -> Tensor Build Data.ByteString.ByteString -- ^ __filename__ shardedFilename' op'options basename shard num_shards | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs basename, buildInputs shard, buildInputs num_shards] return (opDef "ShardedFilename" & op'options & opInputs .~ op'inputs) {- input_arg { name: "basename" type: DT_STRING } input_arg { name: "shard" type: DT_INT32 } input_arg { name: "num_shards" type: DT_INT32 } output_arg { name: "filename" type: DT_STRING } -} -- | Generate a glob pattern matching all sharded file names. shardedFilespec :: Tensor v'1 Data.ByteString.ByteString -- ^ __basename__ -> Tensor v'2 Data.Int.Int32 -- ^ __num_shards__ -> Tensor Build Data.ByteString.ByteString -- ^ __filename__ shardedFilespec = shardedFilespec' id shardedFilespec' :: OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __basename__ -> Tensor v'2 Data.Int.Int32 -- ^ __num_shards__ -> Tensor Build Data.ByteString.ByteString -- ^ __filename__ shardedFilespec' op'options basename num_shards | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs basename, buildInputs num_shards] return (opDef "ShardedFilespec" & op'options & opInputs .~ op'inputs) {- input_arg { name: "basename" type: DT_STRING } input_arg { name: "num_shards" type: DT_INT32 } output_arg { name: "filename" type: DT_STRING } -} -- | Computes sigmoid of `x` element-wise. -- -- Specifically, `y = 1 / (1 + exp(-x))`. sigmoid :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ sigmoid = sigmoid' id sigmoid' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ sigmoid' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "Sigmoid" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | Computes the gradient of the sigmoid of `x` wrt its input. -- -- Specifically, `grad = dy * y * (1 - y)`, where `y = sigmoid(x)`, and -- `dy` is the corresponding input gradient. sigmoidGrad :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ sigmoidGrad = sigmoidGrad' id sigmoidGrad' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ sigmoidGrad' op'options x y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y] return (opDef "SigmoidGrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | Returns an element-wise indication of the sign of a number. -- -- `y = sign(x) = -1` if `x < 0`; 0 if `x == 0`; 1 if `x > 0`. -- -- For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y = 0`. sign :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ sign = sign' id sign' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ sign' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "Sign" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | Computes sin of x element-wise. sin :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ sin = sin' id sin' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ sin' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "Sin" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | Returns the size of a tensor. -- -- This operation returns an integer representing the number of elements in -- `input`. -- -- For example: -- -- ```prettyprint -- # 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]] -- size(t) ==> 12 -- ``` size :: forall v'1 t out_type . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] out_type) => Tensor v'1 t -- ^ __input__ -> Tensor Build out_type -- ^ __output__ size = size' id size' :: forall v'1 t out_type . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] out_type) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor Build out_type -- ^ __output__ size' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "Size" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "out_type" .~ tensorType (undefined :: out_type) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type_attr: "out_type" } attr { name: "T" type: "type" } attr { name: "out_type" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | Parses a text file and creates a batch of examples. skipgram :: forall m' . (MonadBuild m') => Data.Int.Int64 -- ^ __batch_size__: The size of produced batch. -> m' ((Tensor Value Data.ByteString.ByteString, Tensor Value Data.Int.Int32, Tensor Value Data.Int.Int64, Tensor Value Data.Int.Int32, Tensor Value Data.Int.Int64, Tensor Value Data.Int.Int32, Tensor Value Data.Int.Int32)) -- ^ (__vocab_word__, __vocab_freq__, __words_per_epoch__, __current_epoch__, __total_words_processed__, __examples__, __labels__) -- -- * __vocab_word__: A vector of words in the corpus. -- -- * __vocab_freq__: Frequencies of words. Sorted in the non-ascending order. -- -- * __words_per_epoch__: Number of words per epoch in the data file. -- -- * __current_epoch__: The current epoch number. -- -- * __total_words_processed__: The total number of words processed so far. -- -- * __examples__: A vector of word ids. -- -- * __labels__: A vector of word ids. skipgram = skipgram' id skipgram' :: forall m' . (MonadBuild m') => OpParams -> Data.Int.Int64 -- ^ __batch_size__: The size of produced batch. -> m' ((Tensor Value Data.ByteString.ByteString, Tensor Value Data.Int.Int32, Tensor Value Data.Int.Int64, Tensor Value Data.Int.Int32, Tensor Value Data.Int.Int64, Tensor Value Data.Int.Int32, Tensor Value Data.Int.Int32)) -- ^ (__vocab_word__, __vocab_freq__, __words_per_epoch__, __current_epoch__, __total_words_processed__, __examples__, __labels__) -- -- * __vocab_word__: A vector of words in the corpus. -- -- * __vocab_freq__: Frequencies of words. Sorted in the non-ascending order. -- -- * __words_per_epoch__: Number of words per epoch in the data file. -- -- * __current_epoch__: The current epoch number. -- -- * __total_words_processed__: The total number of words processed so far. -- -- * __examples__: A vector of word ids. -- -- * __labels__: A vector of word ids. skipgram' op'options batch_size | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "Skipgram" & opAttr "batch_size" .~ batch_size & op'options & opInputs .~ op'inputs) {- output_arg { name: "vocab_word" description: "A vector of words in the corpus." type: DT_STRING } output_arg { name: "vocab_freq" description: "Frequencies of words. Sorted in the non-ascending order." type: DT_INT32 } output_arg { name: "words_per_epoch" description: "Number of words per epoch in the data file." type: DT_INT64 } output_arg { name: "current_epoch" description: "The current epoch number." type: DT_INT32 } output_arg { name: "total_words_processed" description: "The total number of words processed so far." type: DT_INT64 } output_arg { name: "examples" description: "A vector of word ids." type: DT_INT32 } output_arg { name: "labels" description: "A vector of word ids." type: DT_INT32 } attr { name: "filename" type: "string" description: "The corpus\'s text file name." } attr { name: "batch_size" type: "int" description: "The size of produced batch." } attr { name: "window_size" type: "int" default_value { i: 5 } description: "The number of words to predict to the left and right of the target." } attr { name: "min_count" type: "int" default_value { i: 5 } description: "The minimum number of word occurrences for it to be included in the\nvocabulary." } attr { name: "subsample" type: "float" default_value { f: 1.0e-3 } description: "Threshold for word occurrence. Words that appear with higher\nfrequency will be randomly down-sampled. Set to 0 to disable." } -} -- | Return a slice from 'input'. -- -- The output tensor is a tensor with dimensions described by 'size' -- whose values are extracted from 'input' starting at the offsets in -- 'begin'. -- -- *Requirements*: -- 0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n) slice :: forall v'1 v'2 v'3 t index . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] index) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 index -- ^ __begin__: begin[i] specifies the offset into the 'i'th dimension of -- 'input' to slice from. -> Tensor v'3 index -- ^ __size__: size[i] specifies the number of elements of the 'i'th dimension -- of 'input' to slice. If size[i] is -1, all remaining elements in dimension -- i are included in the slice (i.e. this is equivalent to setting -- size[i] = input.dim_size(i) - begin[i]). -> Tensor Build t -- ^ __output__ slice = slice' id slice' :: forall v'1 v'2 v'3 t index . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] index) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 index -- ^ __begin__: begin[i] specifies the offset into the 'i'th dimension of -- 'input' to slice from. -> Tensor v'3 index -- ^ __size__: size[i] specifies the number of elements of the 'i'th dimension -- of 'input' to slice. If size[i] is -1, all remaining elements in dimension -- i are included in the slice (i.e. this is equivalent to setting -- size[i] = input.dim_size(i) - begin[i]). -> Tensor Build t -- ^ __output__ slice' op'options input begin size | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs begin, buildInputs size] return (opDef "Slice" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Index" .~ tensorType (undefined :: index) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "begin" description: "begin[i] specifies the offset into the \'i\'th dimension of\n\'input\' to slice from." type_attr: "Index" } input_arg { name: "size" description: "size[i] specifies the number of elements of the \'i\'th dimension\nof \'input\' to slice. If size[i] is -1, all remaining elements in dimension\ni are included in the slice (i.e. this is equivalent to setting\nsize[i] = input.dim_size(i) - begin[i])." type_attr: "Index" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "Index" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | Computes softmax activations. -- -- For each batch `i` and class `j` we have -- -- softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j])) softmax :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __logits__: 2-D with shape `[batch_size, num_classes]`. -> Tensor Build t -- ^ __softmax__: Same shape as `logits`. softmax = softmax' id softmax' :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __logits__: 2-D with shape `[batch_size, num_classes]`. -> Tensor Build t -- ^ __softmax__: Same shape as `logits`. softmax' op'options logits | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs logits] return (opDef "Softmax" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "logits" description: "2-D with shape `[batch_size, num_classes]`." type_attr: "T" } output_arg { name: "softmax" description: "Same shape as `logits`." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } -} -- | Computes softmax cross entropy cost and gradients to backpropagate. -- -- Inputs are the logits, not probabilities. softmaxCrossEntropyWithLogits :: forall v'1 v'2 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __features__: batch_size x num_classes matrix -> Tensor v'2 t -- ^ __labels__: batch_size x num_classes matrix -- The caller must ensure that each batch of labels represents a valid -- probability distribution. -> (Tensor Build t, Tensor Build t) -- ^ (__loss__, __backprop__) -- -- * __loss__: Per example loss (batch_size vector). -- -- * __backprop__: backpropagated gradients (batch_size x num_classes matrix). softmaxCrossEntropyWithLogits = softmaxCrossEntropyWithLogits' id softmaxCrossEntropyWithLogits' :: forall v'1 v'2 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __features__: batch_size x num_classes matrix -> Tensor v'2 t -- ^ __labels__: batch_size x num_classes matrix -- The caller must ensure that each batch of labels represents a valid -- probability distribution. -> (Tensor Build t, Tensor Build t) -- ^ (__loss__, __backprop__) -- -- * __loss__: Per example loss (batch_size vector). -- -- * __backprop__: backpropagated gradients (batch_size x num_classes matrix). softmaxCrossEntropyWithLogits' op'options features labels | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs features, buildInputs labels] return (opDef "SoftmaxCrossEntropyWithLogits" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "features" description: "batch_size x num_classes matrix" type_attr: "T" } input_arg { name: "labels" description: "batch_size x num_classes matrix\nThe caller must ensure that each batch of labels represents a valid\nprobability distribution." type_attr: "T" } output_arg { name: "loss" description: "Per example loss (batch_size vector)." type_attr: "T" } output_arg { name: "backprop" description: "backpropagated gradients (batch_size x num_classes matrix)." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } -} -- | Computes softplus: `log(exp(features) + 1)`. softplus :: forall v'1 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __features__ -> Tensor Build t -- ^ __activations__ softplus = softplus' id softplus' :: forall v'1 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __features__ -> Tensor Build t -- ^ __activations__ softplus' op'options features | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs features] return (opDef "Softplus" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "features" type_attr: "T" } output_arg { name: "activations" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF } } } -} -- | Computes softplus gradients for a softplus operation. softplusGrad :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __gradients__: The backpropagated gradients to the corresponding softplus operation. -> Tensor v'2 t -- ^ __features__: The features passed as input to the corresponding softplus operation. -> Tensor Build t -- ^ __backprops__: The gradients: `gradients / (1 + exp(-features))`. softplusGrad = softplusGrad' id softplusGrad' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __gradients__: The backpropagated gradients to the corresponding softplus operation. -> Tensor v'2 t -- ^ __features__: The features passed as input to the corresponding softplus operation. -> Tensor Build t -- ^ __backprops__: The gradients: `gradients / (1 + exp(-features))`. softplusGrad' op'options gradients features | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs gradients, buildInputs features] return (opDef "SoftplusGrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "gradients" description: "The backpropagated gradients to the corresponding softplus operation." type_attr: "T" } input_arg { name: "features" description: "The features passed as input to the corresponding softplus operation." type_attr: "T" } output_arg { name: "backprops" description: "The gradients: `gradients / (1 + exp(-features))`." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF } } } -} -- | Computes softsign: `features / (abs(features) + 1)`. softsign :: forall v'1 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __features__ -> Tensor Build t -- ^ __activations__ softsign = softsign' id softsign' :: forall v'1 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __features__ -> Tensor Build t -- ^ __activations__ softsign' op'options features | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs features] return (opDef "Softsign" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "features" type_attr: "T" } output_arg { name: "activations" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF } } } -} -- | Computes softsign gradients for a softsign operation. softsignGrad :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __gradients__: The backpropagated gradients to the corresponding softsign operation. -> Tensor v'2 t -- ^ __features__: The features passed as input to the corresponding softsign operation. -> Tensor Build t -- ^ __backprops__: The gradients: `gradients / (1 + abs(-features)) ** 2`. softsignGrad = softsignGrad' id softsignGrad' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __gradients__: The backpropagated gradients to the corresponding softsign operation. -> Tensor v'2 t -- ^ __features__: The features passed as input to the corresponding softsign operation. -> Tensor Build t -- ^ __backprops__: The gradients: `gradients / (1 + abs(-features)) ** 2`. softsignGrad' op'options gradients features | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs gradients, buildInputs features] return (opDef "SoftsignGrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "gradients" description: "The backpropagated gradients to the corresponding softsign operation." type_attr: "T" } input_arg { name: "features" description: "The features passed as input to the corresponding softsign operation." type_attr: "T" } output_arg { name: "backprops" description: "The gradients: `gradients / (1 + abs(-features)) ** 2`." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF } } } -} -- | SpaceToBatch for 4-D tensors of type T. -- -- This is a legacy version of the more general SpaceToBatchND. -- -- Zero-pads and then rearranges (permutes) blocks of spatial data into batch. -- More specifically, this op outputs a copy of the input tensor where values from -- the `height` and `width` dimensions are moved to the `batch` dimension. After -- the zero-padding, both `height` and `width` of the input must be divisible by the -- block size. spaceToBatch :: forall v'1 v'2 t tpaddings . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tpaddings) => Data.Int.Int64 -- ^ __block_size__ -> Tensor v'1 t -- ^ __input__: 4-D with shape `[batch, height, width, depth]`. -> Tensor v'2 tpaddings -- ^ __paddings__: 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies -- the padding of the input with zeros across the spatial dimensions as follows: -- -- paddings = [[pad_top, pad_bottom], [pad_left, pad_right]] -- -- The effective spatial dimensions of the zero-padded input tensor will be: -- -- height_pad = pad_top + height + pad_bottom -- width_pad = pad_left + width + pad_right -- -- The attr `block_size` must be greater than one. It indicates the block size. -- -- * Non-overlapping blocks of size `block_size x block size` in the height and -- width dimensions are rearranged into the batch dimension at each location. -- * The batch of the output tensor is `batch * block_size * block_size`. -- * Both height_pad and width_pad must be divisible by block_size. -- -- The shape of the output will be: -- -- [batch*block_size*block_size, height_pad/block_size, width_pad/block_size, -- depth] -- -- Some examples: -- -- (1) For the following input of shape `[1, 2, 2, 1]` and block_size of 2: -- -- ```prettyprint -- x = [[[[1], [2]], [[3], [4]]]] -- ``` -- -- The output tensor has shape `[4, 1, 1, 1]` and value: -- -- ```prettyprint -- [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] -- ``` -- -- (2) For the following input of shape `[1, 2, 2, 3]` and block_size of 2: -- -- ```prettyprint -- x = [[[[1, 2, 3], [4, 5, 6]], -- [[7, 8, 9], [10, 11, 12]]]] -- ``` -- -- The output tensor has shape `[4, 1, 1, 3]` and value: -- -- ```prettyprint -- [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]] -- ``` -- -- (3) For the following input of shape `[1, 4, 4, 1]` and block_size of 2: -- -- ```prettyprint -- x = [[[[1], [2], [3], [4]], -- [[5], [6], [7], [8]], -- [[9], [10], [11], [12]], -- [[13], [14], [15], [16]]]] -- ``` -- -- The output tensor has shape `[4, 2, 2, 1]` and value: -- -- ```prettyprint -- x = [[[[1], [3]], [[9], [11]]], -- [[[2], [4]], [[10], [12]]], -- [[[5], [7]], [[13], [15]]], -- [[[6], [8]], [[14], [16]]]] -- ``` -- -- (4) For the following input of shape `[2, 2, 4, 1]` and block_size of 2: -- -- ```prettyprint -- x = [[[[1], [2], [3], [4]], -- [[5], [6], [7], [8]]], -- [[[9], [10], [11], [12]], -- [[13], [14], [15], [16]]]] -- ``` -- -- The output tensor has shape `[8, 1, 2, 1]` and value: -- -- ```prettyprint -- x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]], -- [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]] -- ``` -- -- Among others, this operation is useful for reducing atrous convolution into -- regular convolution. -> Tensor Build t -- ^ __output__ spaceToBatch = spaceToBatch' id spaceToBatch' :: forall v'1 v'2 t tpaddings . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tpaddings) => OpParams -> Data.Int.Int64 -- ^ __block_size__ -> Tensor v'1 t -- ^ __input__: 4-D with shape `[batch, height, width, depth]`. -> Tensor v'2 tpaddings -- ^ __paddings__: 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies -- the padding of the input with zeros across the spatial dimensions as follows: -- -- paddings = [[pad_top, pad_bottom], [pad_left, pad_right]] -- -- The effective spatial dimensions of the zero-padded input tensor will be: -- -- height_pad = pad_top + height + pad_bottom -- width_pad = pad_left + width + pad_right -- -- The attr `block_size` must be greater than one. It indicates the block size. -- -- * Non-overlapping blocks of size `block_size x block size` in the height and -- width dimensions are rearranged into the batch dimension at each location. -- * The batch of the output tensor is `batch * block_size * block_size`. -- * Both height_pad and width_pad must be divisible by block_size. -- -- The shape of the output will be: -- -- [batch*block_size*block_size, height_pad/block_size, width_pad/block_size, -- depth] -- -- Some examples: -- -- (1) For the following input of shape `[1, 2, 2, 1]` and block_size of 2: -- -- ```prettyprint -- x = [[[[1], [2]], [[3], [4]]]] -- ``` -- -- The output tensor has shape `[4, 1, 1, 1]` and value: -- -- ```prettyprint -- [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] -- ``` -- -- (2) For the following input of shape `[1, 2, 2, 3]` and block_size of 2: -- -- ```prettyprint -- x = [[[[1, 2, 3], [4, 5, 6]], -- [[7, 8, 9], [10, 11, 12]]]] -- ``` -- -- The output tensor has shape `[4, 1, 1, 3]` and value: -- -- ```prettyprint -- [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]] -- ``` -- -- (3) For the following input of shape `[1, 4, 4, 1]` and block_size of 2: -- -- ```prettyprint -- x = [[[[1], [2], [3], [4]], -- [[5], [6], [7], [8]], -- [[9], [10], [11], [12]], -- [[13], [14], [15], [16]]]] -- ``` -- -- The output tensor has shape `[4, 2, 2, 1]` and value: -- -- ```prettyprint -- x = [[[[1], [3]], [[9], [11]]], -- [[[2], [4]], [[10], [12]]], -- [[[5], [7]], [[13], [15]]], -- [[[6], [8]], [[14], [16]]]] -- ``` -- -- (4) For the following input of shape `[2, 2, 4, 1]` and block_size of 2: -- -- ```prettyprint -- x = [[[[1], [2], [3], [4]], -- [[5], [6], [7], [8]]], -- [[[9], [10], [11], [12]], -- [[13], [14], [15], [16]]]] -- ``` -- -- The output tensor has shape `[8, 1, 2, 1]` and value: -- -- ```prettyprint -- x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]], -- [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]] -- ``` -- -- Among others, this operation is useful for reducing atrous convolution into -- regular convolution. -> Tensor Build t -- ^ __output__ spaceToBatch' op'options block_size input paddings | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs paddings] return (opDef "SpaceToBatch" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tpaddings" .~ tensorType (undefined :: tpaddings) & opAttr "block_size" .~ block_size & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "4-D with shape `[batch, height, width, depth]`." type_attr: "T" } input_arg { name: "paddings" description: "2-D tensor of non-negative integers with shape `[2, 2]`. It specifies\n the padding of the input with zeros across the spatial dimensions as follows:\n\n paddings = [[pad_top, pad_bottom], [pad_left, pad_right]]\n\n The effective spatial dimensions of the zero-padded input tensor will be:\n\n height_pad = pad_top + height + pad_bottom\n width_pad = pad_left + width + pad_right\n\nThe attr `block_size` must be greater than one. It indicates the block size.\n\n * Non-overlapping blocks of size `block_size x block size` in the height and\n width dimensions are rearranged into the batch dimension at each location.\n * The batch of the output tensor is `batch * block_size * block_size`.\n * Both height_pad and width_pad must be divisible by block_size.\n\nThe shape of the output will be:\n\n [batch*block_size*block_size, height_pad/block_size, width_pad/block_size,\n depth]\n\nSome examples:\n\n(1) For the following input of shape `[1, 2, 2, 1]` and block_size of 2:\n\n```prettyprint\nx = [[[[1], [2]], [[3], [4]]]]\n```\n\nThe output tensor has shape `[4, 1, 1, 1]` and value:\n\n```prettyprint\n[[[[1]]], [[[2]]], [[[3]]], [[[4]]]]\n```\n\n(2) For the following input of shape `[1, 2, 2, 3]` and block_size of 2:\n\n```prettyprint\nx = [[[[1, 2, 3], [4, 5, 6]],\n [[7, 8, 9], [10, 11, 12]]]]\n```\n\nThe output tensor has shape `[4, 1, 1, 3]` and value:\n\n```prettyprint\n[[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]\n```\n\n(3) For the following input of shape `[1, 4, 4, 1]` and block_size of 2:\n\n```prettyprint\nx = [[[[1], [2], [3], [4]],\n [[5], [6], [7], [8]],\n [[9], [10], [11], [12]],\n [[13], [14], [15], [16]]]]\n```\n\nThe output tensor has shape `[4, 2, 2, 1]` and value:\n\n```prettyprint\nx = [[[[1], [3]], [[9], [11]]],\n [[[2], [4]], [[10], [12]]],\n [[[5], [7]], [[13], [15]]],\n [[[6], [8]], [[14], [16]]]]\n```\n\n(4) For the following input of shape `[2, 2, 4, 1]` and block_size of 2:\n\n```prettyprint\nx = [[[[1], [2], [3], [4]],\n [[5], [6], [7], [8]]],\n [[[9], [10], [11], [12]],\n [[13], [14], [15], [16]]]]\n```\n\nThe output tensor has shape `[8, 1, 2, 1]` and value:\n\n```prettyprint\nx = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],\n [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]\n```\n\nAmong others, this operation is useful for reducing atrous convolution into\nregular convolution." type_attr: "Tpaddings" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "Tpaddings" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "block_size" type: "int" has_minimum: true minimum: 2 } -} -- | SpaceToBatch for N-D tensors of type T. -- -- This operation divides "spatial" dimensions `[1, ..., M]` of the input into a -- grid of blocks of shape `block_shape`, and interleaves these blocks with the -- "batch" dimension (0) such that in the output, the spatial dimensions -- `[1, ..., M]` correspond to the position within the grid, and the batch -- dimension combines both the position within a spatial block and the original -- batch position. Prior to division into blocks, the spatial dimensions of the -- input are optionally zero padded according to `paddings`. See below for a -- precise description. spaceToBatchND :: forall v'1 v'2 v'3 t tblock_shape tpaddings . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tblock_shape, OneOf '[Data.Int.Int32, Data.Int.Int64] tpaddings) => Tensor v'1 t -- ^ __input__: N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`, -- where spatial_shape has `M` dimensions. -> Tensor v'2 tblock_shape -- ^ __block_shape__: 1-D with shape `[M]`, all values must be >= 1. -> Tensor v'3 tpaddings -- ^ __paddings__: 2-D with shape `[M, 2]`, all values must be >= 0. -- `paddings[i] = [pad_start, pad_end]` specifies the padding for input dimension -- `i + 1`, which corresponds to spatial dimension `i`. It is required that -- `block_shape[i]` divides `input_shape[i + 1] + pad_start + pad_end`. -- -- This operation is equivalent to the following steps: -- -- 1. Zero-pad the start and end of dimensions `[1, ..., M]` of the -- input according to `paddings` to produce `padded` of shape `padded_shape`. -- -- 2. Reshape `padded` to `reshaped_padded` of shape: -- -- [batch] + -- [padded_shape[1] / block_shape[0], -- block_shape[0], -- ..., -- padded_shape[M] / block_shape[M-1], -- block_shape[M-1]] + -- remaining_shape -- -- 3. Permute dimensions of `reshaped_padded` to produce -- `permuted_reshaped_padded` of shape: -- -- block_shape + -- [batch] + -- [padded_shape[1] / block_shape[0], -- ..., -- padded_shape[M] / block_shape[M-1]] + -- remaining_shape -- -- 4. Reshape `permuted_reshaped_padded` to flatten `block_shape` into the batch -- dimension, producing an output tensor of shape: -- -- [batch * prod(block_shape)] + -- [padded_shape[1] / block_shape[0], -- ..., -- padded_shape[M] / block_shape[M-1]] + -- remaining_shape -- -- Some examples: -- -- (1) For the following input of shape `[1, 2, 2, 1]`, `block_shape = [2, 2]`, and -- `paddings = [[0, 0], [0, 0]]`: -- -- ```prettyprint -- x = [[[[1], [2]], [[3], [4]]]] -- ``` -- -- The output tensor has shape `[4, 1, 1, 1]` and value: -- -- ```prettyprint -- [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] -- ``` -- -- (2) For the following input of shape `[1, 2, 2, 3]`, `block_shape = [2, 2]`, and -- `paddings = [[0, 0], [0, 0]]`: -- -- ```prettyprint -- x = [[[[1, 2, 3], [4, 5, 6]], -- [[7, 8, 9], [10, 11, 12]]]] -- ``` -- -- The output tensor has shape `[4, 1, 1, 3]` and value: -- -- ```prettyprint -- [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]] -- ``` -- -- (3) For the following input of shape `[1, 4, 4, 1]`, `block_shape = [2, 2]`, and -- `paddings = [[0, 0], [0, 0]]`: -- -- ```prettyprint -- x = [[[[1], [2], [3], [4]], -- [[5], [6], [7], [8]], -- [[9], [10], [11], [12]], -- [[13], [14], [15], [16]]]] -- ``` -- -- The output tensor has shape `[4, 2, 2, 1]` and value: -- -- ```prettyprint -- x = [[[[1], [3]], [[9], [11]]], -- [[[2], [4]], [[10], [12]]], -- [[[5], [7]], [[13], [15]]], -- [[[6], [8]], [[14], [16]]]] -- ``` -- -- (4) For the following input of shape `[2, 2, 4, 1]`, block_shape = `[2, 2]`, and -- paddings = `[[0, 0], [2, 0]]`: -- -- ```prettyprint -- x = [[[[1], [2], [3], [4]], -- [[5], [6], [7], [8]]], -- [[[9], [10], [11], [12]], -- [[13], [14], [15], [16]]]] -- ``` -- -- The output tensor has shape `[8, 1, 3, 1]` and value: -- -- ```prettyprint -- x = [[[[0], [1], [3]]], [[[0], [9], [11]]], -- [[[0], [2], [4]]], [[[0], [10], [12]]], -- [[[0], [5], [7]]], [[[0], [13], [15]]], -- [[[0], [6], [8]]], [[[0], [14], [16]]]] -- ``` -- -- Among others, this operation is useful for reducing atrous convolution into -- regular convolution. -> Tensor Build t -- ^ __output__ spaceToBatchND = spaceToBatchND' id spaceToBatchND' :: forall v'1 v'2 v'3 t tblock_shape tpaddings . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tblock_shape, OneOf '[Data.Int.Int32, Data.Int.Int64] tpaddings) => OpParams -> Tensor v'1 t -- ^ __input__: N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`, -- where spatial_shape has `M` dimensions. -> Tensor v'2 tblock_shape -- ^ __block_shape__: 1-D with shape `[M]`, all values must be >= 1. -> Tensor v'3 tpaddings -- ^ __paddings__: 2-D with shape `[M, 2]`, all values must be >= 0. -- `paddings[i] = [pad_start, pad_end]` specifies the padding for input dimension -- `i + 1`, which corresponds to spatial dimension `i`. It is required that -- `block_shape[i]` divides `input_shape[i + 1] + pad_start + pad_end`. -- -- This operation is equivalent to the following steps: -- -- 1. Zero-pad the start and end of dimensions `[1, ..., M]` of the -- input according to `paddings` to produce `padded` of shape `padded_shape`. -- -- 2. Reshape `padded` to `reshaped_padded` of shape: -- -- [batch] + -- [padded_shape[1] / block_shape[0], -- block_shape[0], -- ..., -- padded_shape[M] / block_shape[M-1], -- block_shape[M-1]] + -- remaining_shape -- -- 3. Permute dimensions of `reshaped_padded` to produce -- `permuted_reshaped_padded` of shape: -- -- block_shape + -- [batch] + -- [padded_shape[1] / block_shape[0], -- ..., -- padded_shape[M] / block_shape[M-1]] + -- remaining_shape -- -- 4. Reshape `permuted_reshaped_padded` to flatten `block_shape` into the batch -- dimension, producing an output tensor of shape: -- -- [batch * prod(block_shape)] + -- [padded_shape[1] / block_shape[0], -- ..., -- padded_shape[M] / block_shape[M-1]] + -- remaining_shape -- -- Some examples: -- -- (1) For the following input of shape `[1, 2, 2, 1]`, `block_shape = [2, 2]`, and -- `paddings = [[0, 0], [0, 0]]`: -- -- ```prettyprint -- x = [[[[1], [2]], [[3], [4]]]] -- ``` -- -- The output tensor has shape `[4, 1, 1, 1]` and value: -- -- ```prettyprint -- [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] -- ``` -- -- (2) For the following input of shape `[1, 2, 2, 3]`, `block_shape = [2, 2]`, and -- `paddings = [[0, 0], [0, 0]]`: -- -- ```prettyprint -- x = [[[[1, 2, 3], [4, 5, 6]], -- [[7, 8, 9], [10, 11, 12]]]] -- ``` -- -- The output tensor has shape `[4, 1, 1, 3]` and value: -- -- ```prettyprint -- [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]] -- ``` -- -- (3) For the following input of shape `[1, 4, 4, 1]`, `block_shape = [2, 2]`, and -- `paddings = [[0, 0], [0, 0]]`: -- -- ```prettyprint -- x = [[[[1], [2], [3], [4]], -- [[5], [6], [7], [8]], -- [[9], [10], [11], [12]], -- [[13], [14], [15], [16]]]] -- ``` -- -- The output tensor has shape `[4, 2, 2, 1]` and value: -- -- ```prettyprint -- x = [[[[1], [3]], [[9], [11]]], -- [[[2], [4]], [[10], [12]]], -- [[[5], [7]], [[13], [15]]], -- [[[6], [8]], [[14], [16]]]] -- ``` -- -- (4) For the following input of shape `[2, 2, 4, 1]`, block_shape = `[2, 2]`, and -- paddings = `[[0, 0], [2, 0]]`: -- -- ```prettyprint -- x = [[[[1], [2], [3], [4]], -- [[5], [6], [7], [8]]], -- [[[9], [10], [11], [12]], -- [[13], [14], [15], [16]]]] -- ``` -- -- The output tensor has shape `[8, 1, 3, 1]` and value: -- -- ```prettyprint -- x = [[[[0], [1], [3]]], [[[0], [9], [11]]], -- [[[0], [2], [4]]], [[[0], [10], [12]]], -- [[[0], [5], [7]]], [[[0], [13], [15]]], -- [[[0], [6], [8]]], [[[0], [14], [16]]]] -- ``` -- -- Among others, this operation is useful for reducing atrous convolution into -- regular convolution. -> Tensor Build t -- ^ __output__ spaceToBatchND' op'options input block_shape paddings | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs block_shape, buildInputs paddings] return (opDef "SpaceToBatchND" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tblock_shape" .~ tensorType (undefined :: tblock_shape) & opAttr "Tpaddings" .~ tensorType (undefined :: tpaddings) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`,\nwhere spatial_shape has `M` dimensions." type_attr: "T" } input_arg { name: "block_shape" description: "1-D with shape `[M]`, all values must be >= 1." type_attr: "Tblock_shape" } input_arg { name: "paddings" description: "2-D with shape `[M, 2]`, all values must be >= 0.\n `paddings[i] = [pad_start, pad_end]` specifies the padding for input dimension\n `i + 1`, which corresponds to spatial dimension `i`. It is required that\n `block_shape[i]` divides `input_shape[i + 1] + pad_start + pad_end`.\n\nThis operation is equivalent to the following steps:\n\n1. Zero-pad the start and end of dimensions `[1, ..., M]` of the\n input according to `paddings` to produce `padded` of shape `padded_shape`.\n\n2. Reshape `padded` to `reshaped_padded` of shape:\n\n [batch] +\n [padded_shape[1] / block_shape[0],\n block_shape[0],\n ...,\n padded_shape[M] / block_shape[M-1],\n block_shape[M-1]] +\n remaining_shape\n\n3. Permute dimensions of `reshaped_padded` to produce\n `permuted_reshaped_padded` of shape:\n\n block_shape +\n [batch] +\n [padded_shape[1] / block_shape[0],\n ...,\n padded_shape[M] / block_shape[M-1]] +\n remaining_shape\n\n4. Reshape `permuted_reshaped_padded` to flatten `block_shape` into the batch\n dimension, producing an output tensor of shape:\n\n [batch * prod(block_shape)] +\n [padded_shape[1] / block_shape[0],\n ...,\n padded_shape[M] / block_shape[M-1]] +\n remaining_shape\n\nSome examples:\n\n(1) For the following input of shape `[1, 2, 2, 1]`, `block_shape = [2, 2]`, and\n `paddings = [[0, 0], [0, 0]]`:\n\n```prettyprint\nx = [[[[1], [2]], [[3], [4]]]]\n```\n\nThe output tensor has shape `[4, 1, 1, 1]` and value:\n\n```prettyprint\n[[[[1]]], [[[2]]], [[[3]]], [[[4]]]]\n```\n\n(2) For the following input of shape `[1, 2, 2, 3]`, `block_shape = [2, 2]`, and\n `paddings = [[0, 0], [0, 0]]`:\n\n```prettyprint\nx = [[[[1, 2, 3], [4, 5, 6]],\n [[7, 8, 9], [10, 11, 12]]]]\n```\n\nThe output tensor has shape `[4, 1, 1, 3]` and value:\n\n```prettyprint\n[[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]\n```\n\n(3) For the following input of shape `[1, 4, 4, 1]`, `block_shape = [2, 2]`, and\n `paddings = [[0, 0], [0, 0]]`:\n\n```prettyprint\nx = [[[[1], [2], [3], [4]],\n [[5], [6], [7], [8]],\n [[9], [10], [11], [12]],\n [[13], [14], [15], [16]]]]\n```\n\nThe output tensor has shape `[4, 2, 2, 1]` and value:\n\n```prettyprint\nx = [[[[1], [3]], [[9], [11]]],\n [[[2], [4]], [[10], [12]]],\n [[[5], [7]], [[13], [15]]],\n [[[6], [8]], [[14], [16]]]]\n```\n\n(4) For the following input of shape `[2, 2, 4, 1]`, block_shape = `[2, 2]`, and\n paddings = `[[0, 0], [2, 0]]`:\n\n```prettyprint\nx = [[[[1], [2], [3], [4]],\n [[5], [6], [7], [8]]],\n [[[9], [10], [11], [12]],\n [[13], [14], [15], [16]]]]\n```\n\nThe output tensor has shape `[8, 1, 3, 1]` and value:\n\n```prettyprint\nx = [[[[0], [1], [3]]], [[[0], [9], [11]]],\n [[[0], [2], [4]]], [[[0], [10], [12]]],\n [[[0], [5], [7]]], [[[0], [13], [15]]],\n [[[0], [6], [8]]], [[[0], [14], [16]]]]\n```\n\nAmong others, this operation is useful for reducing atrous convolution into\nregular convolution." type_attr: "Tpaddings" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "Tblock_shape" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "Tpaddings" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | SpaceToDepth for tensors of type T. -- -- Rearranges blocks of spatial data, into depth. More specifically, -- this op outputs a copy of the input tensor where values from the `height` -- and `width` dimensions are moved to the `depth` dimension. -- The attr `block_size` indicates the input block size and how the data is moved. -- -- * Non-overlapping blocks of size `block_size x block size` are rearranged -- into depth at each location. -- * The depth of the output tensor is `input_depth * block_size * block_size`. -- * The input tensor's height and width must be divisible by block_size. -- -- That is, assuming the input is in the shape: -- `[batch, height, width, depth]`, -- the shape of the output will be: -- `[batch, height/block_size, width/block_size, depth*block_size*block_size]` -- -- This operation requires that the input tensor be of rank 4, and that -- `block_size` be >=1 and a divisor of both the input `height` and `width`. -- -- This operation is useful for resizing the activations between convolutions -- (but keeping all data), e.g. instead of pooling. It is also useful for training -- purely convolutional models. -- -- For example, given this input of shape `[1, 2, 2, 1]`, and block_size of 2: -- -- ```prettyprint -- x = [[[[1], [2]], -- [[3], [4]]]] -- ``` -- -- This operation will output a tensor of shape `[1, 1, 1, 4]`: -- -- ```prettyprint -- [[[[1, 2, 3, 4]]]] -- ``` -- -- Here, the input has a batch of 1 and each batch element has shape `[2, 2, 1]`, -- the corresponding output will have a single element (i.e. width and height are -- both 1) and will have a depth of 4 channels (1 * block_size * block_size). -- The output element shape is `[1, 1, 4]`. -- -- For an input tensor with larger depth, here of shape `[1, 2, 2, 3]`, e.g. -- -- ```prettyprint -- x = [[[[1, 2, 3], [4, 5, 6]], -- [[7, 8, 9], [10, 11, 12]]]] -- ``` -- -- This operation, for block_size of 2, will return the following tensor of shape -- `[1, 1, 1, 12]` -- -- ```prettyprint -- [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]] -- ``` -- -- Similarly, for the following input of shape `[1 4 4 1]`, and a block size of 2: -- -- ```prettyprint -- x = [[[[1], [2], [5], [6]], -- [[3], [4], [7], [8]], -- [[9], [10], [13], [14]], -- [[11], [12], [15], [16]]]] -- ``` -- -- the operator will return the following tensor of shape `[1 2 2 4]`: -- -- ```prettyprint -- x = [[[[1, 2, 3, 4], -- [5, 6, 7, 8]], -- [[9, 10, 11, 12], -- [13, 14, 15, 16]]]] -- ``` spaceToDepth :: forall v'1 t . (TensorType t) => Data.Int.Int64 -- ^ __block_size__: The size of the spatial block. -> Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ spaceToDepth = spaceToDepth' id spaceToDepth' :: forall v'1 t . (TensorType t) => OpParams -> Data.Int.Int64 -- ^ __block_size__: The size of the spatial block. -> Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ spaceToDepth' op'options block_size input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "SpaceToDepth" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "block_size" .~ block_size & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "block_size" type: "int" description: "The size of the spatial block." has_minimum: true minimum: 2 } -} -- | Applies a sparse gradient to a given accumulator. Does not add if local_step is -- -- lesser than the accumulator's global_step. sparseAccumulatorApplyGradient :: forall v'2 v'3 v'4 v'5 dtype m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] dtype) => Bool -- ^ __has_known_shape__: Boolean indicating whether gradient_shape is unknown, in which -- case the input is ignored during validation. -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a accumulator. -> Tensor v'2 Data.Int.Int64 -- ^ __local_step__: The local_step value at which the sparse gradient was computed. -> Tensor v'3 Data.Int.Int64 -- ^ __gradient_indices__: Indices of the sparse gradient to be accumulated. Must be a -- vector. -> Tensor v'4 dtype -- ^ __gradient_values__: Values are the non-zero slices of the gradient, and must have -- the same first dimension as indices, i.e., the nnz represented by indices and -- values must be consistent. -> Tensor v'5 Data.Int.Int64 -- ^ __gradient_shape__: Shape of the sparse gradient to be accumulated. -> m' (ControlNode) sparseAccumulatorApplyGradient = sparseAccumulatorApplyGradient' id sparseAccumulatorApplyGradient' :: forall v'2 v'3 v'4 v'5 dtype m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] dtype) => OpParams -> Bool -- ^ __has_known_shape__: Boolean indicating whether gradient_shape is unknown, in which -- case the input is ignored during validation. -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a accumulator. -> Tensor v'2 Data.Int.Int64 -- ^ __local_step__: The local_step value at which the sparse gradient was computed. -> Tensor v'3 Data.Int.Int64 -- ^ __gradient_indices__: Indices of the sparse gradient to be accumulated. Must be a -- vector. -> Tensor v'4 dtype -- ^ __gradient_values__: Values are the non-zero slices of the gradient, and must have -- the same first dimension as indices, i.e., the nnz represented by indices and -- values must be consistent. -> Tensor v'5 Data.Int.Int64 -- ^ __gradient_shape__: Shape of the sparse gradient to be accumulated. -> m' (ControlNode) sparseAccumulatorApplyGradient' op'options has_known_shape handle local_step gradient_indices gradient_values gradient_shape | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs local_step, buildInputs gradient_indices, buildInputs gradient_values, buildInputs gradient_shape] buildOp [] (opDef "SparseAccumulatorApplyGradient" & opAttr "dtype" .~ tensorType (undefined :: dtype) & opAttr "has_known_shape" .~ has_known_shape & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" description: "The handle to a accumulator." type: DT_STRING is_ref: true } input_arg { name: "local_step" description: "The local_step value at which the sparse gradient was computed." type: DT_INT64 } input_arg { name: "gradient_indices" description: "Indices of the sparse gradient to be accumulated. Must be a\nvector." type: DT_INT64 } input_arg { name: "gradient_values" description: "Values are the non-zero slices of the gradient, and must have\nthe same first dimension as indices, i.e., the nnz represented by indices and\nvalues must be consistent." type_attr: "dtype" } input_arg { name: "gradient_shape" description: "Shape of the sparse gradient to be accumulated." type: DT_INT64 } attr { name: "dtype" type: "type" description: "The data type of accumulated gradients. Needs to correspond to the type\nof the accumulator." allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "has_known_shape" type: "bool" description: "Boolean indicating whether gradient_shape is unknown, in which\ncase the input is ignored during validation." } -} -- | Extracts the average sparse gradient in the given SparseConditionalAccumulator, -- -- provided that sufficient (i.e., more than num_required) gradients have been -- accumulated. The op will blocks until sufficient gradients have been -- accumulated. If the accumulator has already aggregated more than num_required -- gradients, it will return its average of the accumulated gradients. -- Also automatically increments the recorded global_step in the accumulator by 1, -- and resets the aggregate to 0. sparseAccumulatorTakeGradient :: forall v'2 dtype m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] dtype) => Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a SparseConditionalAccumulator. -> Tensor v'2 Data.Int.Int32 -- ^ __num_required__: Number of gradients required before we return an aggregate. -> m' ((Tensor Value Data.Int.Int64, Tensor Value dtype, Tensor Value Data.Int.Int64)) -- ^ (__indices__, __values__, __shape__) -- -- * __indices__: Indices of the average of the accumulated sparse gradients. -- -- * __values__: Values of the average of the accumulated sparse gradients. -- -- * __shape__: Shape of the average of the accumulated sparse gradients. sparseAccumulatorTakeGradient = sparseAccumulatorTakeGradient' id sparseAccumulatorTakeGradient' :: forall v'2 dtype m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] dtype) => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a SparseConditionalAccumulator. -> Tensor v'2 Data.Int.Int32 -- ^ __num_required__: Number of gradients required before we return an aggregate. -> m' ((Tensor Value Data.Int.Int64, Tensor Value dtype, Tensor Value Data.Int.Int64)) -- ^ (__indices__, __values__, __shape__) -- -- * __indices__: Indices of the average of the accumulated sparse gradients. -- -- * __values__: Values of the average of the accumulated sparse gradients. -- -- * __shape__: Shape of the average of the accumulated sparse gradients. sparseAccumulatorTakeGradient' op'options handle num_required | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs num_required] buildOp [] (opDef "SparseAccumulatorTakeGradient" & opAttr "dtype" .~ tensorType (undefined :: dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" description: "The handle to a SparseConditionalAccumulator." type: DT_STRING is_ref: true } input_arg { name: "num_required" description: "Number of gradients required before we return an aggregate." type: DT_INT32 } output_arg { name: "indices" description: "Indices of the average of the accumulated sparse gradients." type: DT_INT64 } output_arg { name: "values" description: "Values of the average of the accumulated sparse gradients." type_attr: "dtype" } output_arg { name: "shape" description: "Shape of the average of the accumulated sparse gradients." type: DT_INT64 } attr { name: "dtype" type: "type" description: "The data type of accumulated gradients. Needs to correspond to the type\nof the accumulator." allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } -} -- | Adds two `SparseTensor` objects to produce another `SparseTensor`. -- -- The input `SparseTensor` objects' indices are assumed ordered in standard -- lexicographic order. If this is not the case, before this step run -- `SparseReorder` to restore index ordering. -- -- By default, if two values sum to zero at some index, the output `SparseTensor` -- would still include that particular location in its index, storing a zero in the -- corresponding value slot. To override this, callers can specify `thresh`, -- indicating that if the sum has a magnitude strictly smaller than `thresh`, its -- corresponding value and index would then not be included. In particular, -- `thresh == 0` (default) means everything is kept and actual thresholding happens -- only for a positive value. -- -- In the following shapes, `nnz` is the count after taking `thresh` into account. sparseAdd :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 t treal . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] treal) => Tensor v'1 Data.Int.Int64 -- ^ __a_indices__: 2-D. The `indices` of the first `SparseTensor`, size `[nnz, ndims]` Matrix. -> Tensor v'2 t -- ^ __a_values__: 1-D. The `values` of the first `SparseTensor`, size `[nnz]` Vector. -> Tensor v'3 Data.Int.Int64 -- ^ __a_shape__: 1-D. The `shape` of the first `SparseTensor`, size `[ndims]` Vector. -> Tensor v'4 Data.Int.Int64 -- ^ __b_indices__: 2-D. The `indices` of the second `SparseTensor`, size `[nnz, ndims]` Matrix. -> Tensor v'5 t -- ^ __b_values__: 1-D. The `values` of the second `SparseTensor`, size `[nnz]` Vector. -> Tensor v'6 Data.Int.Int64 -- ^ __b_shape__: 1-D. The `shape` of the second `SparseTensor`, size `[ndims]` Vector. -> Tensor v'7 treal -- ^ __thresh__: 0-D. The magnitude threshold that determines if an output value/index -- pair takes space. -> (Tensor Build Data.Int.Int64, Tensor Build t, Tensor Build Data.Int.Int64) -- ^ (__sum_indices__, __sum_values__, __sum_shape__) -- -- * __sum_indices__ -- -- * __sum_values__ -- -- * __sum_shape__ sparseAdd = sparseAdd' id sparseAdd' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 t treal . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] treal) => OpParams -> Tensor v'1 Data.Int.Int64 -- ^ __a_indices__: 2-D. The `indices` of the first `SparseTensor`, size `[nnz, ndims]` Matrix. -> Tensor v'2 t -- ^ __a_values__: 1-D. The `values` of the first `SparseTensor`, size `[nnz]` Vector. -> Tensor v'3 Data.Int.Int64 -- ^ __a_shape__: 1-D. The `shape` of the first `SparseTensor`, size `[ndims]` Vector. -> Tensor v'4 Data.Int.Int64 -- ^ __b_indices__: 2-D. The `indices` of the second `SparseTensor`, size `[nnz, ndims]` Matrix. -> Tensor v'5 t -- ^ __b_values__: 1-D. The `values` of the second `SparseTensor`, size `[nnz]` Vector. -> Tensor v'6 Data.Int.Int64 -- ^ __b_shape__: 1-D. The `shape` of the second `SparseTensor`, size `[ndims]` Vector. -> Tensor v'7 treal -- ^ __thresh__: 0-D. The magnitude threshold that determines if an output value/index -- pair takes space. -> (Tensor Build Data.Int.Int64, Tensor Build t, Tensor Build Data.Int.Int64) -- ^ (__sum_indices__, __sum_values__, __sum_shape__) -- -- * __sum_indices__ -- -- * __sum_values__ -- -- * __sum_shape__ sparseAdd' op'options a_indices a_values a_shape b_indices b_values b_shape thresh | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs a_indices, buildInputs a_values, buildInputs a_shape, buildInputs b_indices, buildInputs b_values, buildInputs b_shape, buildInputs thresh] return (opDef "SparseAdd" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Treal" .~ tensorType (undefined :: treal) & op'options & opInputs .~ op'inputs) {- input_arg { name: "a_indices" description: "2-D. The `indices` of the first `SparseTensor`, size `[nnz, ndims]` Matrix." type: DT_INT64 } input_arg { name: "a_values" description: "1-D. The `values` of the first `SparseTensor`, size `[nnz]` Vector." type_attr: "T" } input_arg { name: "a_shape" description: "1-D. The `shape` of the first `SparseTensor`, size `[ndims]` Vector." type: DT_INT64 } input_arg { name: "b_indices" description: "2-D. The `indices` of the second `SparseTensor`, size `[nnz, ndims]` Matrix." type: DT_INT64 } input_arg { name: "b_values" description: "1-D. The `values` of the second `SparseTensor`, size `[nnz]` Vector." type_attr: "T" } input_arg { name: "b_shape" description: "1-D. The `shape` of the second `SparseTensor`, size `[ndims]` Vector." type: DT_INT64 } input_arg { name: "thresh" description: "0-D. The magnitude threshold that determines if an output value/index\npair takes space." type_attr: "Treal" } output_arg { name: "sum_indices" type: DT_INT64 } output_arg { name: "sum_values" type_attr: "T" } output_arg { name: "sum_shape" type: DT_INT64 } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "Treal" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF } } } -} -- | The gradient operator for the SparseAdd op. -- -- The SparseAdd op calculates A + B, where A, B, and the sum are all represented -- as `SparseTensor` objects. This op takes in the upstream gradient w.r.t. -- non-empty values of the sum, and outputs the gradients w.r.t. the non-empty -- values of A and B. sparseAddGrad :: forall v'1 v'2 v'3 v'4 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __backprop_val_grad__: 1-D with shape `[nnz(sum)]`. The gradient with respect to -- the non-empty values of the sum. -> Tensor v'2 Data.Int.Int64 -- ^ __a_indices__: 2-D. The `indices` of the `SparseTensor` A, size `[nnz(A), ndims]`. -> Tensor v'3 Data.Int.Int64 -- ^ __b_indices__: 2-D. The `indices` of the `SparseTensor` B, size `[nnz(B), ndims]`. -> Tensor v'4 Data.Int.Int64 -- ^ __sum_indices__: 2-D. The `indices` of the sum `SparseTensor`, size -- `[nnz(sum), ndims]`. -> (Tensor Build t, Tensor Build t) -- ^ (__a_val_grad__, __b_val_grad__) -- -- * __a_val_grad__: 1-D with shape `[nnz(A)]`. The gradient with respect to the -- non-empty values of A. -- -- * __b_val_grad__: 1-D with shape `[nnz(B)]`. The gradient with respect to the -- non-empty values of B. sparseAddGrad = sparseAddGrad' id sparseAddGrad' :: forall v'1 v'2 v'3 v'4 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __backprop_val_grad__: 1-D with shape `[nnz(sum)]`. The gradient with respect to -- the non-empty values of the sum. -> Tensor v'2 Data.Int.Int64 -- ^ __a_indices__: 2-D. The `indices` of the `SparseTensor` A, size `[nnz(A), ndims]`. -> Tensor v'3 Data.Int.Int64 -- ^ __b_indices__: 2-D. The `indices` of the `SparseTensor` B, size `[nnz(B), ndims]`. -> Tensor v'4 Data.Int.Int64 -- ^ __sum_indices__: 2-D. The `indices` of the sum `SparseTensor`, size -- `[nnz(sum), ndims]`. -> (Tensor Build t, Tensor Build t) -- ^ (__a_val_grad__, __b_val_grad__) -- -- * __a_val_grad__: 1-D with shape `[nnz(A)]`. The gradient with respect to the -- non-empty values of A. -- -- * __b_val_grad__: 1-D with shape `[nnz(B)]`. The gradient with respect to the -- non-empty values of B. sparseAddGrad' op'options backprop_val_grad a_indices b_indices sum_indices | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs backprop_val_grad, buildInputs a_indices, buildInputs b_indices, buildInputs sum_indices] return (opDef "SparseAddGrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "backprop_val_grad" description: "1-D with shape `[nnz(sum)]`. The gradient with respect to\nthe non-empty values of the sum." type_attr: "T" } input_arg { name: "a_indices" description: "2-D. The `indices` of the `SparseTensor` A, size `[nnz(A), ndims]`." type: DT_INT64 } input_arg { name: "b_indices" description: "2-D. The `indices` of the `SparseTensor` B, size `[nnz(B), ndims]`." type: DT_INT64 } input_arg { name: "sum_indices" description: "2-D. The `indices` of the sum `SparseTensor`, size\n`[nnz(sum), ndims]`." type: DT_INT64 } output_arg { name: "a_val_grad" description: "1-D with shape `[nnz(A)]`. The gradient with respect to the\nnon-empty values of A." type_attr: "T" } output_arg { name: "b_val_grad" description: "1-D with shape `[nnz(B)]`. The gradient with respect to the\nnon-empty values of B." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } -} -- | var: Should be from a Variable(). sparseApplyAdadelta :: forall v'4 v'5 v'6 v'7 v'8 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor Ref t -- ^ __var__ -> Tensor Ref t -- ^ __accum__: Should be from a Variable(). -> Tensor Ref t -- ^ __accum_update__: : Should be from a Variable(). -> Tensor v'4 t -- ^ __lr__: Learning rate. Must be a scalar. -> Tensor v'5 t -- ^ __rho__: Decay factor. Must be a scalar. -> Tensor v'6 t -- ^ __epsilon__: Constant factor. Must be a scalar. -> Tensor v'7 t -- ^ __grad__: The gradient. -> Tensor v'8 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum. -> m' (Tensor Ref t) -- ^ __out__: Same as "var". sparseApplyAdadelta = sparseApplyAdadelta' id sparseApplyAdadelta' :: forall v'4 v'5 v'6 v'7 v'8 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor Ref t -- ^ __var__ -> Tensor Ref t -- ^ __accum__: Should be from a Variable(). -> Tensor Ref t -- ^ __accum_update__: : Should be from a Variable(). -> Tensor v'4 t -- ^ __lr__: Learning rate. Must be a scalar. -> Tensor v'5 t -- ^ __rho__: Decay factor. Must be a scalar. -> Tensor v'6 t -- ^ __epsilon__: Constant factor. Must be a scalar. -> Tensor v'7 t -- ^ __grad__: The gradient. -> Tensor v'8 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum. -> m' (Tensor Ref t) -- ^ __out__: Same as "var". sparseApplyAdadelta' op'options var accum accum_update lr rho epsilon grad indices | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs accum, buildInputs accum_update, buildInputs lr, buildInputs rho, buildInputs epsilon, buildInputs grad, buildInputs indices] buildOp [] (opDef "SparseApplyAdadelta" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" type_attr: "T" is_ref: true } input_arg { name: "accum" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "accum_update" description: ": Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "lr" description: "Learning rate. Must be a scalar." type_attr: "T" } input_arg { name: "rho" description: "Decay factor. Must be a scalar." type_attr: "T" } input_arg { name: "epsilon" description: "Constant factor. Must be a scalar." type_attr: "T" } input_arg { name: "grad" description: "The gradient." type_attr: "T" } input_arg { name: "indices" description: "A vector of indices into the first dimension of var and accum." type_attr: "Tindices" } output_arg { name: "out" description: "Same as \"var\"." type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention." } -} -- | Update relevant entries in '*var' and '*accum' according to the adagrad scheme. -- -- That is for rows we have grad for, we update var and accum as follows: -- accum += grad * grad -- var -= lr * grad * (1 / sqrt(accum)) sparseApplyAdagrad :: forall v'3 v'4 v'5 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor Ref t -- ^ __var__: Should be from a Variable(). -> Tensor Ref t -- ^ __accum__: Should be from a Variable(). -> Tensor v'3 t -- ^ __lr__: Learning rate. Must be a scalar. -> Tensor v'4 t -- ^ __grad__: The gradient. -> Tensor v'5 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum. -> m' (Tensor Ref t) -- ^ __out__: Same as "var". sparseApplyAdagrad = sparseApplyAdagrad' id sparseApplyAdagrad' :: forall v'3 v'4 v'5 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor Ref t -- ^ __var__: Should be from a Variable(). -> Tensor Ref t -- ^ __accum__: Should be from a Variable(). -> Tensor v'3 t -- ^ __lr__: Learning rate. Must be a scalar. -> Tensor v'4 t -- ^ __grad__: The gradient. -> Tensor v'5 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum. -> m' (Tensor Ref t) -- ^ __out__: Same as "var". sparseApplyAdagrad' op'options var accum lr grad indices | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs accum, buildInputs lr, buildInputs grad, buildInputs indices] buildOp [] (opDef "SparseApplyAdagrad" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "accum" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "lr" description: "Learning rate. Must be a scalar." type_attr: "T" } input_arg { name: "grad" description: "The gradient." type_attr: "T" } input_arg { name: "indices" description: "A vector of indices into the first dimension of var and accum." type_attr: "Tindices" } output_arg { name: "out" description: "Same as \"var\"." type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention." } -} -- | Update entries in '*var' and '*accum' according to the proximal adagrad scheme. sparseApplyAdagradDA :: forall v'4 v'5 v'6 v'7 v'8 v'9 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor Ref t -- ^ __var__: Should be from a Variable(). -> Tensor Ref t -- ^ __gradient_accumulator__: Should be from a Variable(). -> Tensor Ref t -- ^ __gradient_squared_accumulator__: Should be from a Variable(). -> Tensor v'4 t -- ^ __grad__: The gradient. -> Tensor v'5 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum. -> Tensor v'6 t -- ^ __lr__: Learning rate. Must be a scalar. -> Tensor v'7 t -- ^ __l1__: L1 regularization. Must be a scalar. -> Tensor v'8 t -- ^ __l2__: L2 regularization. Must be a scalar. -> Tensor v'9 Data.Int.Int64 -- ^ __global_step__: Training step number. Must be a scalar. -> m' (Tensor Ref t) -- ^ __out__: Same as "var". sparseApplyAdagradDA = sparseApplyAdagradDA' id sparseApplyAdagradDA' :: forall v'4 v'5 v'6 v'7 v'8 v'9 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor Ref t -- ^ __var__: Should be from a Variable(). -> Tensor Ref t -- ^ __gradient_accumulator__: Should be from a Variable(). -> Tensor Ref t -- ^ __gradient_squared_accumulator__: Should be from a Variable(). -> Tensor v'4 t -- ^ __grad__: The gradient. -> Tensor v'5 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum. -> Tensor v'6 t -- ^ __lr__: Learning rate. Must be a scalar. -> Tensor v'7 t -- ^ __l1__: L1 regularization. Must be a scalar. -> Tensor v'8 t -- ^ __l2__: L2 regularization. Must be a scalar. -> Tensor v'9 Data.Int.Int64 -- ^ __global_step__: Training step number. Must be a scalar. -> m' (Tensor Ref t) -- ^ __out__: Same as "var". sparseApplyAdagradDA' op'options var gradient_accumulator gradient_squared_accumulator grad indices lr l1 l2 global_step | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs gradient_accumulator, buildInputs gradient_squared_accumulator, buildInputs grad, buildInputs indices, buildInputs lr, buildInputs l1, buildInputs l2, buildInputs global_step] buildOp [] (opDef "SparseApplyAdagradDA" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "gradient_accumulator" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "gradient_squared_accumulator" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "grad" description: "The gradient." type_attr: "T" } input_arg { name: "indices" description: "A vector of indices into the first dimension of var and accum." type_attr: "Tindices" } input_arg { name: "lr" description: "Learning rate. Must be a scalar." type_attr: "T" } input_arg { name: "l1" description: "L1 regularization. Must be a scalar." type_attr: "T" } input_arg { name: "l2" description: "L2 regularization. Must be a scalar." type_attr: "T" } input_arg { name: "global_step" description: "Training step number. Must be a scalar." type: DT_INT64 } output_arg { name: "out" description: "Same as \"var\"." type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention." } -} -- | Update '*var' according to the centered RMSProp algorithm. -- -- The centered RMSProp algorithm uses an estimate of the centered second moment -- (i.e., the variance) for normalization, as opposed to regular RMSProp, which -- uses the (uncentered) second moment. This often helps with training, but is -- slightly more expensive in terms of computation and memory. -- -- Note that in dense implementation of this algorithm, mg, ms, and mom will -- update even if the grad is zero, but in this sparse implementation, mg, ms, -- and mom will not update in iterations during which the grad is zero. -- -- mean_square = decay * mean_square + (1-decay) * gradient ** 2 -- mean_grad = decay * mean_grad + (1-decay) * gradient -- Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2) -- -- ms <- rho * ms_{t-1} + (1-rho) * grad * grad -- mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) -- var <- var - mom sparseApplyCenteredRMSProp :: forall v'5 v'6 v'7 v'8 v'9 v'10 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor Ref t -- ^ __var__: Should be from a Variable(). -> Tensor Ref t -- ^ __mg__: Should be from a Variable(). -> Tensor Ref t -- ^ __ms__: Should be from a Variable(). -> Tensor Ref t -- ^ __mom__: Should be from a Variable(). -> Tensor v'5 t -- ^ __lr__: Scaling factor. Must be a scalar. -> Tensor v'6 t -- ^ __rho__: Decay rate. Must be a scalar. -> Tensor v'7 t -- ^ __momentum__ -> Tensor v'8 t -- ^ __epsilon__: Ridge term. Must be a scalar. -> Tensor v'9 t -- ^ __grad__: The gradient. -> Tensor v'10 tindices -- ^ __indices__: A vector of indices into the first dimension of var, ms and mom. -> m' (Tensor Ref t) -- ^ __out__: Same as "var". sparseApplyCenteredRMSProp = sparseApplyCenteredRMSProp' id sparseApplyCenteredRMSProp' :: forall v'5 v'6 v'7 v'8 v'9 v'10 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor Ref t -- ^ __var__: Should be from a Variable(). -> Tensor Ref t -- ^ __mg__: Should be from a Variable(). -> Tensor Ref t -- ^ __ms__: Should be from a Variable(). -> Tensor Ref t -- ^ __mom__: Should be from a Variable(). -> Tensor v'5 t -- ^ __lr__: Scaling factor. Must be a scalar. -> Tensor v'6 t -- ^ __rho__: Decay rate. Must be a scalar. -> Tensor v'7 t -- ^ __momentum__ -> Tensor v'8 t -- ^ __epsilon__: Ridge term. Must be a scalar. -> Tensor v'9 t -- ^ __grad__: The gradient. -> Tensor v'10 tindices -- ^ __indices__: A vector of indices into the first dimension of var, ms and mom. -> m' (Tensor Ref t) -- ^ __out__: Same as "var". sparseApplyCenteredRMSProp' op'options var mg ms mom lr rho momentum epsilon grad indices | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs mg, buildInputs ms, buildInputs mom, buildInputs lr, buildInputs rho, buildInputs momentum, buildInputs epsilon, buildInputs grad, buildInputs indices] buildOp [] (opDef "SparseApplyCenteredRMSProp" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "mg" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "ms" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "mom" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "lr" description: "Scaling factor. Must be a scalar." type_attr: "T" } input_arg { name: "rho" description: "Decay rate. Must be a scalar." type_attr: "T" } input_arg { name: "momentum" type_attr: "T" } input_arg { name: "epsilon" description: "Ridge term. Must be a scalar." type_attr: "T" } input_arg { name: "grad" description: "The gradient." type_attr: "T" } input_arg { name: "indices" description: "A vector of indices into the first dimension of var, ms and mom." type_attr: "Tindices" } output_arg { name: "out" description: "Same as \"var\"." type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If `True`, updating of the var, mg, ms, and mom tensors is\nprotected by a lock; otherwise the behavior is undefined, but may exhibit less\ncontention." } -} -- | Update relevant entries in '*var' according to the Ftrl-proximal scheme. -- -- That is for rows we have grad for, we update var, accum and linear as follows: -- accum_new = accum + grad * grad -- linear += grad + (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var -- quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 -- var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 -- accum = accum_new sparseApplyFtrl :: forall v'4 v'5 v'6 v'7 v'8 v'9 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor Ref t -- ^ __var__: Should be from a Variable(). -> Tensor Ref t -- ^ __accum__: Should be from a Variable(). -> Tensor Ref t -- ^ __linear__: Should be from a Variable(). -> Tensor v'4 t -- ^ __grad__: The gradient. -> Tensor v'5 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum. -> Tensor v'6 t -- ^ __lr__: Scaling factor. Must be a scalar. -> Tensor v'7 t -- ^ __l1__: L1 regularization. Must be a scalar. -> Tensor v'8 t -- ^ __l2__: L2 regularization. Must be a scalar. -> Tensor v'9 t -- ^ __lr_power__: Scaling factor. Must be a scalar. -> m' (Tensor Ref t) -- ^ __out__: Same as "var". sparseApplyFtrl = sparseApplyFtrl' id sparseApplyFtrl' :: forall v'4 v'5 v'6 v'7 v'8 v'9 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor Ref t -- ^ __var__: Should be from a Variable(). -> Tensor Ref t -- ^ __accum__: Should be from a Variable(). -> Tensor Ref t -- ^ __linear__: Should be from a Variable(). -> Tensor v'4 t -- ^ __grad__: The gradient. -> Tensor v'5 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum. -> Tensor v'6 t -- ^ __lr__: Scaling factor. Must be a scalar. -> Tensor v'7 t -- ^ __l1__: L1 regularization. Must be a scalar. -> Tensor v'8 t -- ^ __l2__: L2 regularization. Must be a scalar. -> Tensor v'9 t -- ^ __lr_power__: Scaling factor. Must be a scalar. -> m' (Tensor Ref t) -- ^ __out__: Same as "var". sparseApplyFtrl' op'options var accum linear grad indices lr l1 l2 lr_power | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs accum, buildInputs linear, buildInputs grad, buildInputs indices, buildInputs lr, buildInputs l1, buildInputs l2, buildInputs lr_power] buildOp [] (opDef "SparseApplyFtrl" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "accum" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "linear" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "grad" description: "The gradient." type_attr: "T" } input_arg { name: "indices" description: "A vector of indices into the first dimension of var and accum." type_attr: "Tindices" } input_arg { name: "lr" description: "Scaling factor. Must be a scalar." type_attr: "T" } input_arg { name: "l1" description: "L1 regularization. Must be a scalar." type_attr: "T" } input_arg { name: "l2" description: "L2 regularization. Must be a scalar." type_attr: "T" } input_arg { name: "lr_power" description: "Scaling factor. Must be a scalar." type_attr: "T" } output_arg { name: "out" description: "Same as \"var\"." type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention." } -} -- | Update relevant entries in '*var' and '*accum' according to the momentum scheme. -- -- Set use_nesterov = True if you want to use Nesterov momentum. -- -- That is for rows we have grad for, we update var and accum as follows: -- -- accum = accum * momentum + grad -- var -= lr * accum sparseApplyMomentum :: forall v'3 v'4 v'5 v'6 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor Ref t -- ^ __var__: Should be from a Variable(). -> Tensor Ref t -- ^ __accum__: Should be from a Variable(). -> Tensor v'3 t -- ^ __lr__: Learning rate. Must be a scalar. -> Tensor v'4 t -- ^ __grad__: The gradient. -> Tensor v'5 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum. -> Tensor v'6 t -- ^ __momentum__: Momentum. Must be a scalar. -> m' (Tensor Ref t) -- ^ __out__: Same as "var". sparseApplyMomentum = sparseApplyMomentum' id sparseApplyMomentum' :: forall v'3 v'4 v'5 v'6 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor Ref t -- ^ __var__: Should be from a Variable(). -> Tensor Ref t -- ^ __accum__: Should be from a Variable(). -> Tensor v'3 t -- ^ __lr__: Learning rate. Must be a scalar. -> Tensor v'4 t -- ^ __grad__: The gradient. -> Tensor v'5 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum. -> Tensor v'6 t -- ^ __momentum__: Momentum. Must be a scalar. -> m' (Tensor Ref t) -- ^ __out__: Same as "var". sparseApplyMomentum' op'options var accum lr grad indices momentum | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs accum, buildInputs lr, buildInputs grad, buildInputs indices, buildInputs momentum] buildOp [] (opDef "SparseApplyMomentum" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "accum" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "lr" description: "Learning rate. Must be a scalar." type_attr: "T" } input_arg { name: "grad" description: "The gradient." type_attr: "T" } input_arg { name: "indices" description: "A vector of indices into the first dimension of var and accum." type_attr: "Tindices" } input_arg { name: "momentum" description: "Momentum. Must be a scalar." type_attr: "T" } output_arg { name: "out" description: "Same as \"var\"." type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention." } attr { name: "use_nesterov" type: "bool" default_value { b: false } description: "If `True`, the tensor passed to compute grad will be\nvar - lr * momentum * accum, so in the end, the var you get is actually\nvar - lr * momentum * accum." } -} -- | Sparse update entries in '*var' and '*accum' according to FOBOS algorithm. -- -- That is for rows we have grad for, we update var and accum as follows: -- accum += grad * grad -- prox_v = var -- prox_v -= lr * grad * (1 / sqrt(accum)) -- var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0} sparseApplyProximalAdagrad :: forall v'3 v'4 v'5 v'6 v'7 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor Ref t -- ^ __var__: Should be from a Variable(). -> Tensor Ref t -- ^ __accum__: Should be from a Variable(). -> Tensor v'3 t -- ^ __lr__: Learning rate. Must be a scalar. -> Tensor v'4 t -- ^ __l1__: L1 regularization. Must be a scalar. -> Tensor v'5 t -- ^ __l2__: L2 regularization. Must be a scalar. -> Tensor v'6 t -- ^ __grad__: The gradient. -> Tensor v'7 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum. -> m' (Tensor Ref t) -- ^ __out__: Same as "var". sparseApplyProximalAdagrad = sparseApplyProximalAdagrad' id sparseApplyProximalAdagrad' :: forall v'3 v'4 v'5 v'6 v'7 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor Ref t -- ^ __var__: Should be from a Variable(). -> Tensor Ref t -- ^ __accum__: Should be from a Variable(). -> Tensor v'3 t -- ^ __lr__: Learning rate. Must be a scalar. -> Tensor v'4 t -- ^ __l1__: L1 regularization. Must be a scalar. -> Tensor v'5 t -- ^ __l2__: L2 regularization. Must be a scalar. -> Tensor v'6 t -- ^ __grad__: The gradient. -> Tensor v'7 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum. -> m' (Tensor Ref t) -- ^ __out__: Same as "var". sparseApplyProximalAdagrad' op'options var accum lr l1 l2 grad indices | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs accum, buildInputs lr, buildInputs l1, buildInputs l2, buildInputs grad, buildInputs indices] buildOp [] (opDef "SparseApplyProximalAdagrad" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "accum" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "lr" description: "Learning rate. Must be a scalar." type_attr: "T" } input_arg { name: "l1" description: "L1 regularization. Must be a scalar." type_attr: "T" } input_arg { name: "l2" description: "L2 regularization. Must be a scalar." type_attr: "T" } input_arg { name: "grad" description: "The gradient." type_attr: "T" } input_arg { name: "indices" description: "A vector of indices into the first dimension of var and accum." type_attr: "Tindices" } output_arg { name: "out" description: "Same as \"var\"." type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention." } -} -- | Sparse update '*var' as FOBOS algorithm with fixed learning rate. -- -- That is for rows we have grad for, we update var as follows: -- prox_v = var - alpha * grad -- var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0} sparseApplyProximalGradientDescent :: forall v'2 v'3 v'4 v'5 v'6 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor Ref t -- ^ __var__: Should be from a Variable(). -> Tensor v'2 t -- ^ __alpha__: Scaling factor. Must be a scalar. -> Tensor v'3 t -- ^ __l1__: L1 regularization. Must be a scalar. -> Tensor v'4 t -- ^ __l2__: L2 regularization. Must be a scalar. -> Tensor v'5 t -- ^ __grad__: The gradient. -> Tensor v'6 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum. -> m' (Tensor Ref t) -- ^ __out__: Same as "var". sparseApplyProximalGradientDescent = sparseApplyProximalGradientDescent' id sparseApplyProximalGradientDescent' :: forall v'2 v'3 v'4 v'5 v'6 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor Ref t -- ^ __var__: Should be from a Variable(). -> Tensor v'2 t -- ^ __alpha__: Scaling factor. Must be a scalar. -> Tensor v'3 t -- ^ __l1__: L1 regularization. Must be a scalar. -> Tensor v'4 t -- ^ __l2__: L2 regularization. Must be a scalar. -> Tensor v'5 t -- ^ __grad__: The gradient. -> Tensor v'6 tindices -- ^ __indices__: A vector of indices into the first dimension of var and accum. -> m' (Tensor Ref t) -- ^ __out__: Same as "var". sparseApplyProximalGradientDescent' op'options var alpha l1 l2 grad indices | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs alpha, buildInputs l1, buildInputs l2, buildInputs grad, buildInputs indices] buildOp [] (opDef "SparseApplyProximalGradientDescent" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "alpha" description: "Scaling factor. Must be a scalar." type_attr: "T" } input_arg { name: "l1" description: "L1 regularization. Must be a scalar." type_attr: "T" } input_arg { name: "l2" description: "L2 regularization. Must be a scalar." type_attr: "T" } input_arg { name: "grad" description: "The gradient." type_attr: "T" } input_arg { name: "indices" description: "A vector of indices into the first dimension of var and accum." type_attr: "Tindices" } output_arg { name: "out" description: "Same as \"var\"." type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If True, the subtraction will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention." } -} -- | Update '*var' according to the RMSProp algorithm. -- -- Note that in dense implementation of this algorithm, ms and mom will -- update even if the grad is zero, but in this sparse implementation, ms -- and mom will not update in iterations during which the grad is zero. -- -- mean_square = decay * mean_square + (1-decay) * gradient ** 2 -- Delta = learning_rate * gradient / sqrt(mean_square + epsilon) -- -- ms <- rho * ms_{t-1} + (1-rho) * grad * grad -- mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) -- var <- var - mom sparseApplyRMSProp :: forall v'4 v'5 v'6 v'7 v'8 v'9 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor Ref t -- ^ __var__: Should be from a Variable(). -> Tensor Ref t -- ^ __ms__: Should be from a Variable(). -> Tensor Ref t -- ^ __mom__: Should be from a Variable(). -> Tensor v'4 t -- ^ __lr__: Scaling factor. Must be a scalar. -> Tensor v'5 t -- ^ __rho__: Decay rate. Must be a scalar. -> Tensor v'6 t -- ^ __momentum__ -> Tensor v'7 t -- ^ __epsilon__: Ridge term. Must be a scalar. -> Tensor v'8 t -- ^ __grad__: The gradient. -> Tensor v'9 tindices -- ^ __indices__: A vector of indices into the first dimension of var, ms and mom. -> m' (Tensor Ref t) -- ^ __out__: Same as "var". sparseApplyRMSProp = sparseApplyRMSProp' id sparseApplyRMSProp' :: forall v'4 v'5 v'6 v'7 v'8 v'9 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor Ref t -- ^ __var__: Should be from a Variable(). -> Tensor Ref t -- ^ __ms__: Should be from a Variable(). -> Tensor Ref t -- ^ __mom__: Should be from a Variable(). -> Tensor v'4 t -- ^ __lr__: Scaling factor. Must be a scalar. -> Tensor v'5 t -- ^ __rho__: Decay rate. Must be a scalar. -> Tensor v'6 t -- ^ __momentum__ -> Tensor v'7 t -- ^ __epsilon__: Ridge term. Must be a scalar. -> Tensor v'8 t -- ^ __grad__: The gradient. -> Tensor v'9 tindices -- ^ __indices__: A vector of indices into the first dimension of var, ms and mom. -> m' (Tensor Ref t) -- ^ __out__: Same as "var". sparseApplyRMSProp' op'options var ms mom lr rho momentum epsilon grad indices | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs ms, buildInputs mom, buildInputs lr, buildInputs rho, buildInputs momentum, buildInputs epsilon, buildInputs grad, buildInputs indices] buildOp [] (opDef "SparseApplyRMSProp" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "ms" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "mom" description: "Should be from a Variable()." type_attr: "T" is_ref: true } input_arg { name: "lr" description: "Scaling factor. Must be a scalar." type_attr: "T" } input_arg { name: "rho" description: "Decay rate. Must be a scalar." type_attr: "T" } input_arg { name: "momentum" type_attr: "T" } input_arg { name: "epsilon" description: "Ridge term. Must be a scalar." type_attr: "T" } input_arg { name: "grad" description: "The gradient." type_attr: "T" } input_arg { name: "indices" description: "A vector of indices into the first dimension of var, ms and mom." type_attr: "Tindices" } output_arg { name: "out" description: "Same as \"var\"." type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } description: "If `True`, updating of the var, ms, and mom tensors is protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention." } -} -- | Concatenates a list of `SparseTensor` along the specified dimension. -- -- Concatenation is with respect to the dense versions of these sparse tensors. -- It is assumed that each input is a `SparseTensor` whose elements are ordered -- along increasing dimension number. -- -- All inputs' shapes must match, except for the concat dimension. The -- `indices`, `values`, and `shapes` lists must have the same length. -- -- The output shape is identical to the inputs', except along the concat -- dimension, where it is the sum of the inputs' sizes along that dimension. -- -- The output elements will be resorted to preserve the sort order along -- increasing dimension number. -- -- This op runs in `O(M log M)` time, where `M` is the total number of non-empty -- values across all inputs. This is due to the need for an internal sort in -- order to concatenate efficiently across an arbitrary dimension. -- -- For example, if `concat_dim = 1` and the inputs are -- -- sp_inputs[0]: shape = [2, 3] -- [0, 2]: "a" -- [1, 0]: "b" -- [1, 1]: "c" -- -- sp_inputs[1]: shape = [2, 4] -- [0, 1]: "d" -- [0, 2]: "e" -- -- then the output will be -- -- shape = [2, 7] -- [0, 2]: "a" -- [0, 4]: "d" -- [0, 5]: "e" -- [1, 0]: "b" -- [1, 1]: "c" -- -- Graphically this is equivalent to doing -- -- [ a] concat [ d e ] = [ a d e ] -- [b c ] [ ] [b c ] sparseConcat :: forall v'1 v'2 v'3 t . (TensorType t) => Data.Int.Int64 -- ^ __concat_dim__: Dimension to concatenate along. Must be in range [-rank, rank), -- where rank is the number of dimensions in each input `SparseTensor`. -> [Tensor v'1 Data.Int.Int64] -- ^ __indices__: 2-D. Indices of each input `SparseTensor`. -> [Tensor v'2 t] -- ^ __values__: 1-D. Non-empty values of each `SparseTensor`. -> [Tensor v'3 Data.Int.Int64] -- ^ __shapes__: 1-D. Shapes of each `SparseTensor`. -> (Tensor Build Data.Int.Int64, Tensor Build t, Tensor Build Data.Int.Int64) -- ^ (__output_indices__, __output_values__, __output_shape__) -- -- * __output_indices__: 2-D. Indices of the concatenated `SparseTensor`. -- -- * __output_values__: 1-D. Non-empty values of the concatenated `SparseTensor`. -- -- * __output_shape__: 1-D. Shape of the concatenated `SparseTensor`. sparseConcat = sparseConcat' id sparseConcat' :: forall v'1 v'2 v'3 t . (TensorType t) => OpParams -> Data.Int.Int64 -- ^ __concat_dim__: Dimension to concatenate along. Must be in range [-rank, rank), -- where rank is the number of dimensions in each input `SparseTensor`. -> [Tensor v'1 Data.Int.Int64] -- ^ __indices__: 2-D. Indices of each input `SparseTensor`. -> [Tensor v'2 t] -- ^ __values__: 1-D. Non-empty values of each `SparseTensor`. -> [Tensor v'3 Data.Int.Int64] -- ^ __shapes__: 1-D. Shapes of each `SparseTensor`. -> (Tensor Build Data.Int.Int64, Tensor Build t, Tensor Build Data.Int.Int64) -- ^ (__output_indices__, __output_values__, __output_shape__) -- -- * __output_indices__: 2-D. Indices of the concatenated `SparseTensor`. -- -- * __output_values__: 1-D. Non-empty values of the concatenated `SparseTensor`. -- -- * __output_shape__: 1-D. Shape of the concatenated `SparseTensor`. sparseConcat' op'options concat_dim indices values shapes | eqLengthGuard [("N", [("indices", length indices), ("values", length values), ("shapes", length shapes)])] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs indices, buildInputs values, buildInputs shapes] return (opDef "SparseConcat" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "concat_dim" .~ concat_dim & opAttr "N" .~ n & op'options & opInputs .~ op'inputs) where n = fromIntegral (length indices) :: Int64 {- input_arg { name: "indices" description: "2-D. Indices of each input `SparseTensor`." type: DT_INT64 number_attr: "N" } input_arg { name: "values" description: "1-D. Non-empty values of each `SparseTensor`." type_attr: "T" number_attr: "N" } input_arg { name: "shapes" description: "1-D. Shapes of each `SparseTensor`." type: DT_INT64 number_attr: "N" } output_arg { name: "output_indices" description: "2-D. Indices of the concatenated `SparseTensor`." type: DT_INT64 } output_arg { name: "output_values" description: "1-D. Non-empty values of the concatenated `SparseTensor`." type_attr: "T" } output_arg { name: "output_shape" description: "1-D. Shape of the concatenated `SparseTensor`." type: DT_INT64 } attr { name: "concat_dim" type: "int" description: "Dimension to concatenate along. Must be in range [-rank, rank),\nwhere rank is the number of dimensions in each input `SparseTensor`." } attr { name: "N" type: "int" has_minimum: true minimum: 2 } attr { name: "T" type: "type" } -} -- | A conditional accumulator for aggregating sparse gradients. The accumulator -- -- accepts gradients marked with local_step greater or equal to the most recent -- global_step known to the accumulator. The average can be extracted from the -- accumulator, provided sufficient gradients have been accumulated. Extracting the -- average automatically resets the aggregate to 0, and increments the global_step -- recorded by the accumulator. sparseConditionalAccumulator :: forall m' . (MonadBuild m') => DataType -- ^ __dtype__: The type of the value being accumulated. -> Shape -- ^ __shape__: The shape of the values. -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __handle__: The handle to the accumulator. sparseConditionalAccumulator = sparseConditionalAccumulator' id sparseConditionalAccumulator' :: forall m' . (MonadBuild m') => OpParams -> DataType -- ^ __dtype__: The type of the value being accumulated. -> Shape -- ^ __shape__: The shape of the values. -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __handle__: The handle to the accumulator. sparseConditionalAccumulator' op'options dtype shape | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "SparseConditionalAccumulator" & opAttr "dtype" .~ dtype & opAttr "shape" .~ shape & op'options & opInputs .~ op'inputs) {- output_arg { name: "handle" description: "The handle to the accumulator." type: DT_STRING is_ref: true } attr { name: "dtype" type: "type" description: "The type of the value being accumulated." allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "shape" type: "shape" description: "The shape of the values." } attr { name: "container" type: "string" default_value { s: "" } description: "If non-empty, this accumulator is placed in the given container.\nOtherwise, a default container is used." } attr { name: "shared_name" type: "string" default_value { s: "" } description: "If non-empty, this accumulator will be shared under the given name\nacross multiple sessions." } -} -- | Adds up a SparseTensor and a dense Tensor, using these special rules: -- -- (1) Broadcasts the dense side to have the same shape as the sparse side, if -- eligible; -- (2) Then, only the dense values pointed to by the indices of the SparseTensor -- participate in the cwise addition. -- -- By these rules, the result is a logical SparseTensor with exactly the same -- indices and shape, but possibly with different non-zero values. The output of -- this Op is the resultant non-zero values. sparseDenseCwiseAdd :: forall v'1 v'2 v'3 v'4 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 Data.Int.Int64 -- ^ __sp_indices__: 2-D. `N x R` matrix with the indices of non-empty values in a -- SparseTensor, possibly not in canonical ordering. -> Tensor v'2 t -- ^ __sp_values__: 1-D. `N` non-empty values corresponding to `sp_indices`. -> Tensor v'3 Data.Int.Int64 -- ^ __sp_shape__: 1-D. Shape of the input SparseTensor. -> Tensor v'4 t -- ^ __dense__: `R`-D. The dense Tensor operand. -> Tensor Build t -- ^ __output__: 1-D. The `N` values that are operated on. sparseDenseCwiseAdd = sparseDenseCwiseAdd' id sparseDenseCwiseAdd' :: forall v'1 v'2 v'3 v'4 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 Data.Int.Int64 -- ^ __sp_indices__: 2-D. `N x R` matrix with the indices of non-empty values in a -- SparseTensor, possibly not in canonical ordering. -> Tensor v'2 t -- ^ __sp_values__: 1-D. `N` non-empty values corresponding to `sp_indices`. -> Tensor v'3 Data.Int.Int64 -- ^ __sp_shape__: 1-D. Shape of the input SparseTensor. -> Tensor v'4 t -- ^ __dense__: `R`-D. The dense Tensor operand. -> Tensor Build t -- ^ __output__: 1-D. The `N` values that are operated on. sparseDenseCwiseAdd' op'options sp_indices sp_values sp_shape dense | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs sp_indices, buildInputs sp_values, buildInputs sp_shape, buildInputs dense] return (opDef "SparseDenseCwiseAdd" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "sp_indices" description: "2-D. `N x R` matrix with the indices of non-empty values in a\nSparseTensor, possibly not in canonical ordering." type: DT_INT64 } input_arg { name: "sp_values" description: "1-D. `N` non-empty values corresponding to `sp_indices`." type_attr: "T" } input_arg { name: "sp_shape" description: "1-D. Shape of the input SparseTensor." type: DT_INT64 } input_arg { name: "dense" description: "`R`-D. The dense Tensor operand." type_attr: "T" } output_arg { name: "output" description: "1-D. The `N` values that are operated on." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } -} -- | Component-wise divides a SparseTensor by a dense Tensor. -- -- *Limitation*: this Op only broadcasts the dense side to the sparse side, but not -- the other direction. sparseDenseCwiseDiv :: forall v'1 v'2 v'3 v'4 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 Data.Int.Int64 -- ^ __sp_indices__: 2-D. `N x R` matrix with the indices of non-empty values in a -- SparseTensor, possibly not in canonical ordering. -> Tensor v'2 t -- ^ __sp_values__: 1-D. `N` non-empty values corresponding to `sp_indices`. -> Tensor v'3 Data.Int.Int64 -- ^ __sp_shape__: 1-D. Shape of the input SparseTensor. -> Tensor v'4 t -- ^ __dense__: `R`-D. The dense Tensor operand. -> Tensor Build t -- ^ __output__: 1-D. The `N` values that are operated on. sparseDenseCwiseDiv = sparseDenseCwiseDiv' id sparseDenseCwiseDiv' :: forall v'1 v'2 v'3 v'4 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 Data.Int.Int64 -- ^ __sp_indices__: 2-D. `N x R` matrix with the indices of non-empty values in a -- SparseTensor, possibly not in canonical ordering. -> Tensor v'2 t -- ^ __sp_values__: 1-D. `N` non-empty values corresponding to `sp_indices`. -> Tensor v'3 Data.Int.Int64 -- ^ __sp_shape__: 1-D. Shape of the input SparseTensor. -> Tensor v'4 t -- ^ __dense__: `R`-D. The dense Tensor operand. -> Tensor Build t -- ^ __output__: 1-D. The `N` values that are operated on. sparseDenseCwiseDiv' op'options sp_indices sp_values sp_shape dense | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs sp_indices, buildInputs sp_values, buildInputs sp_shape, buildInputs dense] return (opDef "SparseDenseCwiseDiv" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "sp_indices" description: "2-D. `N x R` matrix with the indices of non-empty values in a\nSparseTensor, possibly not in canonical ordering." type: DT_INT64 } input_arg { name: "sp_values" description: "1-D. `N` non-empty values corresponding to `sp_indices`." type_attr: "T" } input_arg { name: "sp_shape" description: "1-D. Shape of the input SparseTensor." type: DT_INT64 } input_arg { name: "dense" description: "`R`-D. The dense Tensor operand." type_attr: "T" } output_arg { name: "output" description: "1-D. The `N` values that are operated on." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } -} -- | Component-wise multiplies a SparseTensor by a dense Tensor. -- -- The output locations corresponding to the implicitly zero elements in the sparse -- tensor will be zero (i.e., will not take up storage space), regardless of the -- contents of the dense tensor (even if it's +/-INF and that INF*0 == NaN). -- -- *Limitation*: this Op only broadcasts the dense side to the sparse side, but not -- the other direction. sparseDenseCwiseMul :: forall v'1 v'2 v'3 v'4 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 Data.Int.Int64 -- ^ __sp_indices__: 2-D. `N x R` matrix with the indices of non-empty values in a -- SparseTensor, possibly not in canonical ordering. -> Tensor v'2 t -- ^ __sp_values__: 1-D. `N` non-empty values corresponding to `sp_indices`. -> Tensor v'3 Data.Int.Int64 -- ^ __sp_shape__: 1-D. Shape of the input SparseTensor. -> Tensor v'4 t -- ^ __dense__: `R`-D. The dense Tensor operand. -> Tensor Build t -- ^ __output__: 1-D. The `N` values that are operated on. sparseDenseCwiseMul = sparseDenseCwiseMul' id sparseDenseCwiseMul' :: forall v'1 v'2 v'3 v'4 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 Data.Int.Int64 -- ^ __sp_indices__: 2-D. `N x R` matrix with the indices of non-empty values in a -- SparseTensor, possibly not in canonical ordering. -> Tensor v'2 t -- ^ __sp_values__: 1-D. `N` non-empty values corresponding to `sp_indices`. -> Tensor v'3 Data.Int.Int64 -- ^ __sp_shape__: 1-D. Shape of the input SparseTensor. -> Tensor v'4 t -- ^ __dense__: `R`-D. The dense Tensor operand. -> Tensor Build t -- ^ __output__: 1-D. The `N` values that are operated on. sparseDenseCwiseMul' op'options sp_indices sp_values sp_shape dense | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs sp_indices, buildInputs sp_values, buildInputs sp_shape, buildInputs dense] return (opDef "SparseDenseCwiseMul" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "sp_indices" description: "2-D. `N x R` matrix with the indices of non-empty values in a\nSparseTensor, possibly not in canonical ordering." type: DT_INT64 } input_arg { name: "sp_values" description: "1-D. `N` non-empty values corresponding to `sp_indices`." type_attr: "T" } input_arg { name: "sp_shape" description: "1-D. Shape of the input SparseTensor." type: DT_INT64 } input_arg { name: "dense" description: "`R`-D. The dense Tensor operand." type_attr: "T" } output_arg { name: "output" description: "1-D. The `N` values that are operated on." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } -} -- | Multiply matrix "a" by matrix "b". -- -- The inputs must be two-dimensional matrices and the inner dimension of "a" must -- match the outer dimension of "b". This op is optimized for the case where at -- least one of "a" or "b" is sparse. The breakeven for using this versus a dense -- matrix multiply on one platform was 30% zero values in the sparse matrix. sparseMatMul :: forall v'1 v'2 ta tb . (OneOf '[Data.Word.Word16, Float] ta, OneOf '[Data.Word.Word16, Float] tb) => Tensor v'1 ta -- ^ __a__ -> Tensor v'2 tb -- ^ __b__ -> Tensor Build Float -- ^ __product__ sparseMatMul = sparseMatMul' id sparseMatMul' :: forall v'1 v'2 ta tb . (OneOf '[Data.Word.Word16, Float] ta, OneOf '[Data.Word.Word16, Float] tb) => OpParams -> Tensor v'1 ta -- ^ __a__ -> Tensor v'2 tb -- ^ __b__ -> Tensor Build Float -- ^ __product__ sparseMatMul' op'options a b | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs a, buildInputs b] return (opDef "SparseMatMul" & opAttr "Ta" .~ tensorType (undefined :: ta) & opAttr "Tb" .~ tensorType (undefined :: tb) & op'options & opInputs .~ op'inputs) {- input_arg { name: "a" type_attr: "Ta" } input_arg { name: "b" type_attr: "Tb" } output_arg { name: "product" type: DT_FLOAT } attr { name: "transpose_a" type: "bool" default_value { b: false } } attr { name: "transpose_b" type: "bool" default_value { b: false } } attr { name: "a_is_sparse" type: "bool" default_value { b: false } } attr { name: "b_is_sparse" type: "bool" default_value { b: false } } attr { name: "Ta" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_FLOAT type: DT_BFLOAT16 } } } attr { name: "Tb" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_FLOAT type: DT_BFLOAT16 } } } -} -- | Computes the sum of elements across dimensions of a SparseTensor. -- -- This Op takes a SparseTensor and is the sparse counterpart to -- `tf.reduce_sum()`. In particular, this Op also returns a dense `Tensor` -- instead of a sparse one. -- -- Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless -- `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in -- `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained -- with length 1. -- -- If `reduction_axes` has no entries, all dimensions are reduced, and a tensor -- with a single element is returned. Additionally, the axes can be negative, -- which are interpreted according to the indexing rules in Python. sparseReduceSum :: forall v'1 v'2 v'3 v'4 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 Data.Int.Int64 -- ^ __input_indices__: 2-D. `N x R` matrix with the indices of non-empty values in a -- SparseTensor, possibly not in canonical ordering. -> Tensor v'2 t -- ^ __input_values__: 1-D. `N` non-empty values corresponding to `input_indices`. -> Tensor v'3 Data.Int.Int64 -- ^ __input_shape__: 1-D. Shape of the input SparseTensor. -> Tensor v'4 Data.Int.Int32 -- ^ __reduction_axes__: 1-D. Length-`K` vector containing the reduction axes. -> Tensor Build t -- ^ __output__: `R-K`-D. The reduced Tensor. sparseReduceSum = sparseReduceSum' id sparseReduceSum' :: forall v'1 v'2 v'3 v'4 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 Data.Int.Int64 -- ^ __input_indices__: 2-D. `N x R` matrix with the indices of non-empty values in a -- SparseTensor, possibly not in canonical ordering. -> Tensor v'2 t -- ^ __input_values__: 1-D. `N` non-empty values corresponding to `input_indices`. -> Tensor v'3 Data.Int.Int64 -- ^ __input_shape__: 1-D. Shape of the input SparseTensor. -> Tensor v'4 Data.Int.Int32 -- ^ __reduction_axes__: 1-D. Length-`K` vector containing the reduction axes. -> Tensor Build t -- ^ __output__: `R-K`-D. The reduced Tensor. sparseReduceSum' op'options input_indices input_values input_shape reduction_axes | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_indices, buildInputs input_values, buildInputs input_shape, buildInputs reduction_axes] return (opDef "SparseReduceSum" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_indices" description: "2-D. `N x R` matrix with the indices of non-empty values in a\nSparseTensor, possibly not in canonical ordering." type: DT_INT64 } input_arg { name: "input_values" description: "1-D. `N` non-empty values corresponding to `input_indices`." type_attr: "T" } input_arg { name: "input_shape" description: "1-D. Shape of the input SparseTensor." type: DT_INT64 } input_arg { name: "reduction_axes" description: "1-D. Length-`K` vector containing the reduction axes." type: DT_INT32 } output_arg { name: "output" description: "`R-K`-D. The reduced Tensor." type_attr: "T" } attr { name: "keep_dims" type: "bool" default_value { b: false } description: "If true, retain reduced dimensions with length 1." } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } -} -- | Computes the sum of elements across dimensions of a SparseTensor. -- -- This Op takes a SparseTensor and is the sparse counterpart to -- `tf.reduce_sum()`. In contrast to SparseReduceSum, this Op returns a -- SparseTensor. -- -- Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless -- `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in -- `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained -- with length 1. -- -- If `reduction_axes` has no entries, all dimensions are reduced, and a tensor -- with a single element is returned. Additionally, the axes can be negative, -- which are interpreted according to the indexing rules in Python. sparseReduceSumSparse :: forall v'1 v'2 v'3 v'4 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 Data.Int.Int64 -- ^ __input_indices__: 2-D. `N x R` matrix with the indices of non-empty values in a -- SparseTensor, possibly not in canonical ordering. -> Tensor v'2 t -- ^ __input_values__: 1-D. `N` non-empty values corresponding to `input_indices`. -> Tensor v'3 Data.Int.Int64 -- ^ __input_shape__: 1-D. Shape of the input SparseTensor. -> Tensor v'4 Data.Int.Int32 -- ^ __reduction_axes__: 1-D. Length-`K` vector containing the reduction axes. -> (Tensor Build Data.Int.Int64, Tensor Build t, Tensor Build Data.Int.Int64) -- ^ (__output_indices__, __output_values__, __output_shape__) -- -- * __output_indices__ -- -- * __output_values__ -- -- * __output_shape__ sparseReduceSumSparse = sparseReduceSumSparse' id sparseReduceSumSparse' :: forall v'1 v'2 v'3 v'4 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 Data.Int.Int64 -- ^ __input_indices__: 2-D. `N x R` matrix with the indices of non-empty values in a -- SparseTensor, possibly not in canonical ordering. -> Tensor v'2 t -- ^ __input_values__: 1-D. `N` non-empty values corresponding to `input_indices`. -> Tensor v'3 Data.Int.Int64 -- ^ __input_shape__: 1-D. Shape of the input SparseTensor. -> Tensor v'4 Data.Int.Int32 -- ^ __reduction_axes__: 1-D. Length-`K` vector containing the reduction axes. -> (Tensor Build Data.Int.Int64, Tensor Build t, Tensor Build Data.Int.Int64) -- ^ (__output_indices__, __output_values__, __output_shape__) -- -- * __output_indices__ -- -- * __output_values__ -- -- * __output_shape__ sparseReduceSumSparse' op'options input_indices input_values input_shape reduction_axes | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_indices, buildInputs input_values, buildInputs input_shape, buildInputs reduction_axes] return (opDef "SparseReduceSumSparse" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_indices" description: "2-D. `N x R` matrix with the indices of non-empty values in a\nSparseTensor, possibly not in canonical ordering." type: DT_INT64 } input_arg { name: "input_values" description: "1-D. `N` non-empty values corresponding to `input_indices`." type_attr: "T" } input_arg { name: "input_shape" description: "1-D. Shape of the input SparseTensor." type: DT_INT64 } input_arg { name: "reduction_axes" description: "1-D. Length-`K` vector containing the reduction axes." type: DT_INT32 } output_arg { name: "output_indices" type: DT_INT64 } output_arg { name: "output_values" type_attr: "T" } output_arg { name: "output_shape" type: DT_INT64 } attr { name: "keep_dims" type: "bool" default_value { b: false } description: "If true, retain reduced dimensions with length 1." } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } -} -- | Reorders a SparseTensor into the canonical, row-major ordering. -- -- Note that by convention, all sparse ops preserve the canonical ordering along -- increasing dimension number. The only time ordering can be violated is during -- manual manipulation of the indices and values vectors to add entries. -- -- Reordering does not affect the shape of the SparseTensor. -- -- If the tensor has rank `R` and `N` non-empty values, `input_indices` has -- shape `[N, R]`, input_values has length `N`, and input_shape has length `R`. sparseReorder :: forall v'1 v'2 v'3 t . (TensorType t) => Tensor v'1 Data.Int.Int64 -- ^ __input_indices__: 2-D. `N x R` matrix with the indices of non-empty values in a -- SparseTensor, possibly not in canonical ordering. -> Tensor v'2 t -- ^ __input_values__: 1-D. `N` non-empty values corresponding to `input_indices`. -> Tensor v'3 Data.Int.Int64 -- ^ __input_shape__: 1-D. Shape of the input SparseTensor. -> (Tensor Build Data.Int.Int64, Tensor Build t) -- ^ (__output_indices__, __output_values__) -- -- * __output_indices__: 2-D. `N x R` matrix with the same indices as input_indices, but -- in canonical row-major ordering. -- -- * __output_values__: 1-D. `N` non-empty values corresponding to `output_indices`. sparseReorder = sparseReorder' id sparseReorder' :: forall v'1 v'2 v'3 t . (TensorType t) => OpParams -> Tensor v'1 Data.Int.Int64 -- ^ __input_indices__: 2-D. `N x R` matrix with the indices of non-empty values in a -- SparseTensor, possibly not in canonical ordering. -> Tensor v'2 t -- ^ __input_values__: 1-D. `N` non-empty values corresponding to `input_indices`. -> Tensor v'3 Data.Int.Int64 -- ^ __input_shape__: 1-D. Shape of the input SparseTensor. -> (Tensor Build Data.Int.Int64, Tensor Build t) -- ^ (__output_indices__, __output_values__) -- -- * __output_indices__: 2-D. `N x R` matrix with the same indices as input_indices, but -- in canonical row-major ordering. -- -- * __output_values__: 1-D. `N` non-empty values corresponding to `output_indices`. sparseReorder' op'options input_indices input_values input_shape | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_indices, buildInputs input_values, buildInputs input_shape] return (opDef "SparseReorder" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_indices" description: "2-D. `N x R` matrix with the indices of non-empty values in a\nSparseTensor, possibly not in canonical ordering." type: DT_INT64 } input_arg { name: "input_values" description: "1-D. `N` non-empty values corresponding to `input_indices`." type_attr: "T" } input_arg { name: "input_shape" description: "1-D. Shape of the input SparseTensor." type: DT_INT64 } output_arg { name: "output_indices" description: "2-D. `N x R` matrix with the same indices as input_indices, but\nin canonical row-major ordering." type: DT_INT64 } output_arg { name: "output_values" description: "1-D. `N` non-empty values corresponding to `output_indices`." type_attr: "T" } attr { name: "T" type: "type" } -} -- | Reshapes a SparseTensor to represent values in a new dense shape. -- -- This operation has the same semantics as reshape on the represented dense -- tensor. The `input_indices` are recomputed based on the requested `new_shape`. -- -- If one component of `new_shape` is the special value -1, the size of that -- dimension is computed so that the total dense size remains constant. At -- most one component of `new_shape` can be -1. The number of dense elements -- implied by `new_shape` must be the same as the number of dense elements -- originally implied by `input_shape`. -- -- Reshaping does not affect the order of values in the SparseTensor. -- -- If the input tensor has rank `R_in` and `N` non-empty values, and `new_shape` -- has length `R_out`, then `input_indices` has shape `[N, R_in]`, -- `input_shape` has length `R_in`, `output_indices` has shape `[N, R_out]`, and -- `output_shape` has length `R_out`. sparseReshape :: Tensor v'1 Data.Int.Int64 -- ^ __input_indices__: 2-D. `N x R_in` matrix with the indices of non-empty values in a -- SparseTensor. -> Tensor v'2 Data.Int.Int64 -- ^ __input_shape__: 1-D. `R_in` vector with the input SparseTensor's dense shape. -> Tensor v'3 Data.Int.Int64 -- ^ __new_shape__: 1-D. `R_out` vector with the requested new dense shape. -> (Tensor Build Data.Int.Int64, Tensor Build Data.Int.Int64) -- ^ (__output_indices__, __output_shape__) -- -- * __output_indices__: 2-D. `N x R_out` matrix with the updated indices of non-empty -- values in the output SparseTensor. -- -- * __output_shape__: 1-D. `R_out` vector with the full dense shape of the output -- SparseTensor. This is the same as `new_shape` but with any -1 dimensions -- filled in. sparseReshape = sparseReshape' id sparseReshape' :: OpParams -> Tensor v'1 Data.Int.Int64 -- ^ __input_indices__: 2-D. `N x R_in` matrix with the indices of non-empty values in a -- SparseTensor. -> Tensor v'2 Data.Int.Int64 -- ^ __input_shape__: 1-D. `R_in` vector with the input SparseTensor's dense shape. -> Tensor v'3 Data.Int.Int64 -- ^ __new_shape__: 1-D. `R_out` vector with the requested new dense shape. -> (Tensor Build Data.Int.Int64, Tensor Build Data.Int.Int64) -- ^ (__output_indices__, __output_shape__) -- -- * __output_indices__: 2-D. `N x R_out` matrix with the updated indices of non-empty -- values in the output SparseTensor. -- -- * __output_shape__: 1-D. `R_out` vector with the full dense shape of the output -- SparseTensor. This is the same as `new_shape` but with any -1 dimensions -- filled in. sparseReshape' op'options input_indices input_shape new_shape | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_indices, buildInputs input_shape, buildInputs new_shape] return (opDef "SparseReshape" & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_indices" description: "2-D. `N x R_in` matrix with the indices of non-empty values in a\nSparseTensor." type: DT_INT64 } input_arg { name: "input_shape" description: "1-D. `R_in` vector with the input SparseTensor\'s dense shape." type: DT_INT64 } input_arg { name: "new_shape" description: "1-D. `R_out` vector with the requested new dense shape." type: DT_INT64 } output_arg { name: "output_indices" description: "2-D. `N x R_out` matrix with the updated indices of non-empty\nvalues in the output SparseTensor." type: DT_INT64 } output_arg { name: "output_shape" description: "1-D. `R_out` vector with the full dense shape of the output\nSparseTensor. This is the same as `new_shape` but with any -1 dimensions\nfilled in." type: DT_INT64 } -} -- | Computes the mean along sparse segments of a tensor. -- -- Read @{$math_ops#segmentation$the section on segmentation} for an explanation of -- segments. -- -- Like `SegmentMean`, but `segment_ids` can have rank less than `data`'s first -- dimension, selecting a subset of dimension 0, specified by `indices`. sparseSegmentMean :: forall v'1 v'2 v'3 t tidx . (OneOf '[Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => Tensor v'1 t -- ^ __data__ -> Tensor v'2 tidx -- ^ __indices__: A 1-D tensor. Has same rank as `segment_ids`. -> Tensor v'3 Data.Int.Int32 -- ^ __segment_ids__: A 1-D tensor. Values should be sorted and can be repeated. -> Tensor Build t -- ^ __output__: Has same shape as data, except for dimension 0 which -- has size `k`, the number of segments. sparseSegmentMean = sparseSegmentMean' id sparseSegmentMean' :: forall v'1 v'2 v'3 t tidx . (OneOf '[Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => OpParams -> Tensor v'1 t -- ^ __data__ -> Tensor v'2 tidx -- ^ __indices__: A 1-D tensor. Has same rank as `segment_ids`. -> Tensor v'3 Data.Int.Int32 -- ^ __segment_ids__: A 1-D tensor. Values should be sorted and can be repeated. -> Tensor Build t -- ^ __output__: Has same shape as data, except for dimension 0 which -- has size `k`, the number of segments. sparseSegmentMean' op'options data' indices segment_ids | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs data', buildInputs indices, buildInputs segment_ids] return (opDef "SparseSegmentMean" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tidx" .~ tensorType (undefined :: tidx) & op'options & opInputs .~ op'inputs) {- input_arg { name: "data" type_attr: "T" } input_arg { name: "indices" description: "A 1-D tensor. Has same rank as `segment_ids`." type_attr: "Tidx" } input_arg { name: "segment_ids" description: "A 1-D tensor. Values should be sorted and can be repeated." type: DT_INT32 } output_arg { name: "output" description: "Has same shape as data, except for dimension 0 which\nhas size `k`, the number of segments." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "Tidx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | Computes gradients for SparseSegmentMean. -- -- Returns tensor "output" with same shape as grad, except for dimension 0 whose -- value is output_dim0. sparseSegmentMeanGrad :: forall v'1 v'2 v'3 v'4 t tidx . (OneOf '[Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => Tensor v'1 t -- ^ __grad__: gradient propagated to the SparseSegmentMean op. -> Tensor v'2 tidx -- ^ __indices__: indices passed to the corresponding SparseSegmentMean op. -> Tensor v'3 Data.Int.Int32 -- ^ __segment_ids__: segment_ids passed to the corresponding SparseSegmentMean op. -> Tensor v'4 Data.Int.Int32 -- ^ __output_dim0__: dimension 0 of "data" passed to SparseSegmentMean op. -> Tensor Build t -- ^ __output__ sparseSegmentMeanGrad = sparseSegmentMeanGrad' id sparseSegmentMeanGrad' :: forall v'1 v'2 v'3 v'4 t tidx . (OneOf '[Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => OpParams -> Tensor v'1 t -- ^ __grad__: gradient propagated to the SparseSegmentMean op. -> Tensor v'2 tidx -- ^ __indices__: indices passed to the corresponding SparseSegmentMean op. -> Tensor v'3 Data.Int.Int32 -- ^ __segment_ids__: segment_ids passed to the corresponding SparseSegmentMean op. -> Tensor v'4 Data.Int.Int32 -- ^ __output_dim0__: dimension 0 of "data" passed to SparseSegmentMean op. -> Tensor Build t -- ^ __output__ sparseSegmentMeanGrad' op'options grad indices segment_ids output_dim0 | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs grad, buildInputs indices, buildInputs segment_ids, buildInputs output_dim0] return (opDef "SparseSegmentMeanGrad" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tidx" .~ tensorType (undefined :: tidx) & op'options & opInputs .~ op'inputs) {- input_arg { name: "grad" description: "gradient propagated to the SparseSegmentMean op." type_attr: "T" } input_arg { name: "indices" description: "indices passed to the corresponding SparseSegmentMean op." type_attr: "Tidx" } input_arg { name: "segment_ids" description: "segment_ids passed to the corresponding SparseSegmentMean op." type: DT_INT32 } input_arg { name: "output_dim0" description: "dimension 0 of \"data\" passed to SparseSegmentMean op." type: DT_INT32 } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "Tidx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | Computes the sum along sparse segments of a tensor divided by the sqrt of N. -- -- N is the size of the segment being reduced. -- -- Read @{$math_ops#segmentation$the section on segmentation} for an explanation of -- segments. sparseSegmentSqrtN :: forall v'1 v'2 v'3 t tidx . (OneOf '[Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => Tensor v'1 t -- ^ __data__ -> Tensor v'2 tidx -- ^ __indices__: A 1-D tensor. Has same rank as `segment_ids`. -> Tensor v'3 Data.Int.Int32 -- ^ __segment_ids__: A 1-D tensor. Values should be sorted and can be repeated. -> Tensor Build t -- ^ __output__: Has same shape as data, except for dimension 0 which -- has size `k`, the number of segments. sparseSegmentSqrtN = sparseSegmentSqrtN' id sparseSegmentSqrtN' :: forall v'1 v'2 v'3 t tidx . (OneOf '[Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => OpParams -> Tensor v'1 t -- ^ __data__ -> Tensor v'2 tidx -- ^ __indices__: A 1-D tensor. Has same rank as `segment_ids`. -> Tensor v'3 Data.Int.Int32 -- ^ __segment_ids__: A 1-D tensor. Values should be sorted and can be repeated. -> Tensor Build t -- ^ __output__: Has same shape as data, except for dimension 0 which -- has size `k`, the number of segments. sparseSegmentSqrtN' op'options data' indices segment_ids | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs data', buildInputs indices, buildInputs segment_ids] return (opDef "SparseSegmentSqrtN" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tidx" .~ tensorType (undefined :: tidx) & op'options & opInputs .~ op'inputs) {- input_arg { name: "data" type_attr: "T" } input_arg { name: "indices" description: "A 1-D tensor. Has same rank as `segment_ids`." type_attr: "Tidx" } input_arg { name: "segment_ids" description: "A 1-D tensor. Values should be sorted and can be repeated." type: DT_INT32 } output_arg { name: "output" description: "Has same shape as data, except for dimension 0 which\nhas size `k`, the number of segments." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "Tidx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | Computes gradients for SparseSegmentSqrtN. -- -- Returns tensor "output" with same shape as grad, except for dimension 0 whose -- value is output_dim0. sparseSegmentSqrtNGrad :: forall v'1 v'2 v'3 v'4 t tidx . (OneOf '[Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => Tensor v'1 t -- ^ __grad__: gradient propagated to the SparseSegmentSqrtN op. -> Tensor v'2 tidx -- ^ __indices__: indices passed to the corresponding SparseSegmentSqrtN op. -> Tensor v'3 Data.Int.Int32 -- ^ __segment_ids__: segment_ids passed to the corresponding SparseSegmentSqrtN op. -> Tensor v'4 Data.Int.Int32 -- ^ __output_dim0__: dimension 0 of "data" passed to SparseSegmentSqrtN op. -> Tensor Build t -- ^ __output__ sparseSegmentSqrtNGrad = sparseSegmentSqrtNGrad' id sparseSegmentSqrtNGrad' :: forall v'1 v'2 v'3 v'4 t tidx . (OneOf '[Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => OpParams -> Tensor v'1 t -- ^ __grad__: gradient propagated to the SparseSegmentSqrtN op. -> Tensor v'2 tidx -- ^ __indices__: indices passed to the corresponding SparseSegmentSqrtN op. -> Tensor v'3 Data.Int.Int32 -- ^ __segment_ids__: segment_ids passed to the corresponding SparseSegmentSqrtN op. -> Tensor v'4 Data.Int.Int32 -- ^ __output_dim0__: dimension 0 of "data" passed to SparseSegmentSqrtN op. -> Tensor Build t -- ^ __output__ sparseSegmentSqrtNGrad' op'options grad indices segment_ids output_dim0 | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs grad, buildInputs indices, buildInputs segment_ids, buildInputs output_dim0] return (opDef "SparseSegmentSqrtNGrad" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tidx" .~ tensorType (undefined :: tidx) & op'options & opInputs .~ op'inputs) {- input_arg { name: "grad" description: "gradient propagated to the SparseSegmentSqrtN op." type_attr: "T" } input_arg { name: "indices" description: "indices passed to the corresponding SparseSegmentSqrtN op." type_attr: "Tidx" } input_arg { name: "segment_ids" description: "segment_ids passed to the corresponding SparseSegmentSqrtN op." type: DT_INT32 } input_arg { name: "output_dim0" description: "dimension 0 of \"data\" passed to SparseSegmentSqrtN op." type: DT_INT32 } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "Tidx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | Computes the sum along sparse segments of a tensor. -- -- Read @{$math_ops#segmentation$the section on segmentation} for an explanation of -- segments. -- -- Like `SegmentSum`, but `segment_ids` can have rank less than `data`'s first -- dimension, selecting a subset of dimension 0, specified by `indices`. -- -- For example: -- -- ```prettyprint -- c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]]) -- -- # Select two rows, one segment. -- tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0])) -- ==> [[0 0 0 0]] -- -- # Select two rows, two segment. -- tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1])) -- ==> [[ 1 2 3 4] -- [-1 -2 -3 -4]] -- -- # Select all rows, two segments. -- tf.sparse_segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1])) -- ==> [[0 0 0 0] -- [5 6 7 8]] -- -- # Which is equivalent to: -- tf.segment_sum(c, tf.constant([0, 0, 1])) -- ``` sparseSegmentSum :: forall v'1 v'2 v'3 t tidx . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => Tensor v'1 t -- ^ __data__ -> Tensor v'2 tidx -- ^ __indices__: A 1-D tensor. Has same rank as `segment_ids`. -> Tensor v'3 Data.Int.Int32 -- ^ __segment_ids__: A 1-D tensor. Values should be sorted and can be repeated. -> Tensor Build t -- ^ __output__: Has same shape as data, except for dimension 0 which -- has size `k`, the number of segments. sparseSegmentSum = sparseSegmentSum' id sparseSegmentSum' :: forall v'1 v'2 v'3 t tidx . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => OpParams -> Tensor v'1 t -- ^ __data__ -> Tensor v'2 tidx -- ^ __indices__: A 1-D tensor. Has same rank as `segment_ids`. -> Tensor v'3 Data.Int.Int32 -- ^ __segment_ids__: A 1-D tensor. Values should be sorted and can be repeated. -> Tensor Build t -- ^ __output__: Has same shape as data, except for dimension 0 which -- has size `k`, the number of segments. sparseSegmentSum' op'options data' indices segment_ids | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs data', buildInputs indices, buildInputs segment_ids] return (opDef "SparseSegmentSum" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tidx" .~ tensorType (undefined :: tidx) & op'options & opInputs .~ op'inputs) {- input_arg { name: "data" type_attr: "T" } input_arg { name: "indices" description: "A 1-D tensor. Has same rank as `segment_ids`." type_attr: "Tidx" } input_arg { name: "segment_ids" description: "A 1-D tensor. Values should be sorted and can be repeated." type: DT_INT32 } output_arg { name: "output" description: "Has same shape as data, except for dimension 0 which\nhas size `k`, the number of segments." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF } } } attr { name: "Tidx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | Applies softmax to a batched N-D `SparseTensor`. -- -- The inputs represent an N-D SparseTensor with logical shape `[..., B, C]` -- (where `N >= 2`), and with indices sorted in the canonical lexicographic order. -- -- This op is equivalent to applying the normal `tf.nn.softmax()` to each innermost -- logical submatrix with shape `[B, C]`, but with the catch that *the implicitly -- zero elements do not participate*. Specifically, the algorithm is equivalent -- to the following: -- -- (1) Applies `tf.nn.softmax()` to a densified view of each innermost submatrix -- with shape `[B, C]`, along the size-C dimension; -- (2) Masks out the original implicitly-zero locations; -- (3) Renormalizes the remaining elements. -- -- Hence, the `SparseTensor` result has exactly the same non-zero indices and -- shape. sparseSoftmax :: forall v'1 v'2 v'3 t . (OneOf '[Double, Float] t) => Tensor v'1 Data.Int.Int64 -- ^ __sp_indices__: 2-D. `NNZ x R` matrix with the indices of non-empty values in a -- SparseTensor, in canonical ordering. -> Tensor v'2 t -- ^ __sp_values__: 1-D. `NNZ` non-empty values corresponding to `sp_indices`. -> Tensor v'3 Data.Int.Int64 -- ^ __sp_shape__: 1-D. Shape of the input SparseTensor. -> Tensor Build t -- ^ __output__: 1-D. The `NNZ` values for the result `SparseTensor`. sparseSoftmax = sparseSoftmax' id sparseSoftmax' :: forall v'1 v'2 v'3 t . (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 Data.Int.Int64 -- ^ __sp_indices__: 2-D. `NNZ x R` matrix with the indices of non-empty values in a -- SparseTensor, in canonical ordering. -> Tensor v'2 t -- ^ __sp_values__: 1-D. `NNZ` non-empty values corresponding to `sp_indices`. -> Tensor v'3 Data.Int.Int64 -- ^ __sp_shape__: 1-D. Shape of the input SparseTensor. -> Tensor Build t -- ^ __output__: 1-D. The `NNZ` values for the result `SparseTensor`. sparseSoftmax' op'options sp_indices sp_values sp_shape | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs sp_indices, buildInputs sp_values, buildInputs sp_shape] return (opDef "SparseSoftmax" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "sp_indices" description: "2-D. `NNZ x R` matrix with the indices of non-empty values in a\nSparseTensor, in canonical ordering." type: DT_INT64 } input_arg { name: "sp_values" description: "1-D. `NNZ` non-empty values corresponding to `sp_indices`." type_attr: "T" } input_arg { name: "sp_shape" description: "1-D. Shape of the input SparseTensor." type: DT_INT64 } output_arg { name: "output" description: "1-D. The `NNZ` values for the result `SparseTensor`." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } -} -- | Computes softmax cross entropy cost and gradients to backpropagate. -- -- Unlike `SoftmaxCrossEntropyWithLogits`, this operation does not accept -- a matrix of label probabilities, but rather a single label per row -- of features. This label is considered to have probability 1.0 for the -- given row. -- -- Inputs are the logits, not probabilities. sparseSoftmaxCrossEntropyWithLogits :: forall v'1 v'2 t tlabels . (OneOf '[Data.Word.Word16, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tlabels) => Tensor v'1 t -- ^ __features__: batch_size x num_classes matrix -> Tensor v'2 tlabels -- ^ __labels__: batch_size vector with values in [0, num_classes). -- This is the label for the given minibatch entry. -> (Tensor Build t, Tensor Build t) -- ^ (__loss__, __backprop__) -- -- * __loss__: Per example loss (batch_size vector). -- -- * __backprop__: backpropagated gradients (batch_size x num_classes matrix). sparseSoftmaxCrossEntropyWithLogits = sparseSoftmaxCrossEntropyWithLogits' id sparseSoftmaxCrossEntropyWithLogits' :: forall v'1 v'2 t tlabels . (OneOf '[Data.Word.Word16, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tlabels) => OpParams -> Tensor v'1 t -- ^ __features__: batch_size x num_classes matrix -> Tensor v'2 tlabels -- ^ __labels__: batch_size vector with values in [0, num_classes). -- This is the label for the given minibatch entry. -> (Tensor Build t, Tensor Build t) -- ^ (__loss__, __backprop__) -- -- * __loss__: Per example loss (batch_size vector). -- -- * __backprop__: backpropagated gradients (batch_size x num_classes matrix). sparseSoftmaxCrossEntropyWithLogits' op'options features labels | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs features, buildInputs labels] return (opDef "SparseSoftmaxCrossEntropyWithLogits" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tlabels" .~ tensorType (undefined :: tlabels) & op'options & opInputs .~ op'inputs) {- input_arg { name: "features" description: "batch_size x num_classes matrix" type_attr: "T" } input_arg { name: "labels" description: "batch_size vector with values in [0, num_classes).\nThis is the label for the given minibatch entry." type_attr: "Tlabels" } output_arg { name: "loss" description: "Per example loss (batch_size vector)." type_attr: "T" } output_arg { name: "backprop" description: "backpropagated gradients (batch_size x num_classes matrix)." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "Tlabels" type: "type" default_value { type: DT_INT64 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | Returns the element-wise max of two SparseTensors. -- -- Assumes the two SparseTensors have the same shape, i.e., no broadcasting. sparseSparseMaximum :: forall v'1 v'2 v'3 v'4 v'5 v'6 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 Data.Int.Int64 -- ^ __a_indices__: 2-D. `N x R` matrix with the indices of non-empty values in a -- SparseTensor, in the canonical lexicographic ordering. -> Tensor v'2 t -- ^ __a_values__: 1-D. `N` non-empty values corresponding to `a_indices`. -> Tensor v'3 Data.Int.Int64 -- ^ __a_shape__: 1-D. Shape of the input SparseTensor. -> Tensor v'4 Data.Int.Int64 -- ^ __b_indices__: counterpart to `a_indices` for the other operand. -> Tensor v'5 t -- ^ __b_values__: counterpart to `a_values` for the other operand; must be of the same dtype. -> Tensor v'6 Data.Int.Int64 -- ^ __b_shape__: counterpart to `a_shape` for the other operand; the two shapes must be equal. -> (Tensor Build Data.Int.Int64, Tensor Build t) -- ^ (__output_indices__, __output_values__) -- -- * __output_indices__: 2-D. The indices of the output SparseTensor. -- -- * __output_values__: 1-D. The values of the output SparseTensor. sparseSparseMaximum = sparseSparseMaximum' id sparseSparseMaximum' :: forall v'1 v'2 v'3 v'4 v'5 v'6 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 Data.Int.Int64 -- ^ __a_indices__: 2-D. `N x R` matrix with the indices of non-empty values in a -- SparseTensor, in the canonical lexicographic ordering. -> Tensor v'2 t -- ^ __a_values__: 1-D. `N` non-empty values corresponding to `a_indices`. -> Tensor v'3 Data.Int.Int64 -- ^ __a_shape__: 1-D. Shape of the input SparseTensor. -> Tensor v'4 Data.Int.Int64 -- ^ __b_indices__: counterpart to `a_indices` for the other operand. -> Tensor v'5 t -- ^ __b_values__: counterpart to `a_values` for the other operand; must be of the same dtype. -> Tensor v'6 Data.Int.Int64 -- ^ __b_shape__: counterpart to `a_shape` for the other operand; the two shapes must be equal. -> (Tensor Build Data.Int.Int64, Tensor Build t) -- ^ (__output_indices__, __output_values__) -- -- * __output_indices__: 2-D. The indices of the output SparseTensor. -- -- * __output_values__: 1-D. The values of the output SparseTensor. sparseSparseMaximum' op'options a_indices a_values a_shape b_indices b_values b_shape | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs a_indices, buildInputs a_values, buildInputs a_shape, buildInputs b_indices, buildInputs b_values, buildInputs b_shape] return (opDef "SparseSparseMaximum" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "a_indices" description: "2-D. `N x R` matrix with the indices of non-empty values in a\nSparseTensor, in the canonical lexicographic ordering." type: DT_INT64 } input_arg { name: "a_values" description: "1-D. `N` non-empty values corresponding to `a_indices`." type_attr: "T" } input_arg { name: "a_shape" description: "1-D. Shape of the input SparseTensor." type: DT_INT64 } input_arg { name: "b_indices" description: "counterpart to `a_indices` for the other operand." type: DT_INT64 } input_arg { name: "b_values" description: "counterpart to `a_values` for the other operand; must be of the same dtype." type_attr: "T" } input_arg { name: "b_shape" description: "counterpart to `a_shape` for the other operand; the two shapes must be equal." type: DT_INT64 } output_arg { name: "output_indices" description: "2-D. The indices of the output SparseTensor." type: DT_INT64 } output_arg { name: "output_values" description: "1-D. The values of the output SparseTensor." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF } } } -} -- | Returns the element-wise min of two SparseTensors. -- -- Assumes the two SparseTensors have the same shape, i.e., no broadcasting. sparseSparseMinimum :: forall v'1 v'2 v'3 v'4 v'5 v'6 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 Data.Int.Int64 -- ^ __a_indices__: 2-D. `N x R` matrix with the indices of non-empty values in a -- SparseTensor, in the canonical lexicographic ordering. -> Tensor v'2 t -- ^ __a_values__: 1-D. `N` non-empty values corresponding to `a_indices`. -> Tensor v'3 Data.Int.Int64 -- ^ __a_shape__: 1-D. Shape of the input SparseTensor. -> Tensor v'4 Data.Int.Int64 -- ^ __b_indices__: counterpart to `a_indices` for the other operand. -> Tensor v'5 t -- ^ __b_values__: counterpart to `a_values` for the other operand; must be of the same dtype. -> Tensor v'6 Data.Int.Int64 -- ^ __b_shape__: counterpart to `a_shape` for the other operand; the two shapes must be equal. -> (Tensor Build Data.Int.Int64, Tensor Build t) -- ^ (__output_indices__, __output_values__) -- -- * __output_indices__: 2-D. The indices of the output SparseTensor. -- -- * __output_values__: 1-D. The values of the output SparseTensor. sparseSparseMinimum = sparseSparseMinimum' id sparseSparseMinimum' :: forall v'1 v'2 v'3 v'4 v'5 v'6 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 Data.Int.Int64 -- ^ __a_indices__: 2-D. `N x R` matrix with the indices of non-empty values in a -- SparseTensor, in the canonical lexicographic ordering. -> Tensor v'2 t -- ^ __a_values__: 1-D. `N` non-empty values corresponding to `a_indices`. -> Tensor v'3 Data.Int.Int64 -- ^ __a_shape__: 1-D. Shape of the input SparseTensor. -> Tensor v'4 Data.Int.Int64 -- ^ __b_indices__: counterpart to `a_indices` for the other operand. -> Tensor v'5 t -- ^ __b_values__: counterpart to `a_values` for the other operand; must be of the same dtype. -> Tensor v'6 Data.Int.Int64 -- ^ __b_shape__: counterpart to `a_shape` for the other operand; the two shapes must be equal. -> (Tensor Build Data.Int.Int64, Tensor Build t) -- ^ (__output_indices__, __output_values__) -- -- * __output_indices__: 2-D. The indices of the output SparseTensor. -- -- * __output_values__: 1-D. The values of the output SparseTensor. sparseSparseMinimum' op'options a_indices a_values a_shape b_indices b_values b_shape | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs a_indices, buildInputs a_values, buildInputs a_shape, buildInputs b_indices, buildInputs b_values, buildInputs b_shape] return (opDef "SparseSparseMinimum" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "a_indices" description: "2-D. `N x R` matrix with the indices of non-empty values in a\nSparseTensor, in the canonical lexicographic ordering." type: DT_INT64 } input_arg { name: "a_values" description: "1-D. `N` non-empty values corresponding to `a_indices`." type_attr: "T" } input_arg { name: "a_shape" description: "1-D. Shape of the input SparseTensor." type: DT_INT64 } input_arg { name: "b_indices" description: "counterpart to `a_indices` for the other operand." type: DT_INT64 } input_arg { name: "b_values" description: "counterpart to `a_values` for the other operand; must be of the same dtype." type_attr: "T" } input_arg { name: "b_shape" description: "counterpart to `a_shape` for the other operand; the two shapes must be equal." type: DT_INT64 } output_arg { name: "output_indices" description: "2-D. The indices of the output SparseTensor." type: DT_INT64 } output_arg { name: "output_values" description: "1-D. The values of the output SparseTensor." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } -} -- | Split a `SparseTensor` into `num_split` tensors along one dimension. -- -- If the `shape[split_dim]` is not an integer multiple of `num_split`. Slices -- `[0 : shape[split_dim] % num_split]` gets one extra dimension. -- For example, if `split_dim = 1` and `num_split = 2` and the input is -- -- input_tensor = shape = [2, 7] -- [ a d e ] -- [b c ] -- -- Graphically the output tensors are: -- -- output_tensor[0] = shape = [2, 4] -- [ a ] -- [b c ] -- -- output_tensor[1] = shape = [2, 3] -- [ d e ] -- [ ] sparseSplit :: forall v'1 v'2 v'3 v'4 t . (TensorType t) => Data.Int.Int64 -- ^ __num_split__: The number of ways to split. -> Tensor v'1 Data.Int.Int64 -- ^ __split_dim__: 0-D. The dimension along which to split. Must be in the range -- `[0, rank(shape))`. -> Tensor v'2 Data.Int.Int64 -- ^ __indices__: 2-D tensor represents the indices of the sparse tensor. -> Tensor v'3 t -- ^ __values__: 1-D tensor represents the values of the sparse tensor. -> Tensor v'4 Data.Int.Int64 -- ^ __shape__: 1-D. tensor represents the shape of the sparse tensor. -- output indices: A list of 1-D tensors represents the indices of the output -- sparse tensors. -> ([Tensor Build Data.Int.Int64], [Tensor Build t], [Tensor Build Data.Int.Int64]) -- ^ (__output_indices__, __output_values__, __output_shape__) -- -- * __output_indices__ -- -- * __output_values__: A list of 1-D tensors represents the values of the output sparse -- tensors. -- -- * __output_shape__: A list of 1-D tensors represents the shape of the output sparse -- tensors. sparseSplit = sparseSplit' id sparseSplit' :: forall v'1 v'2 v'3 v'4 t . (TensorType t) => OpParams -> Data.Int.Int64 -- ^ __num_split__: The number of ways to split. -> Tensor v'1 Data.Int.Int64 -- ^ __split_dim__: 0-D. The dimension along which to split. Must be in the range -- `[0, rank(shape))`. -> Tensor v'2 Data.Int.Int64 -- ^ __indices__: 2-D tensor represents the indices of the sparse tensor. -> Tensor v'3 t -- ^ __values__: 1-D tensor represents the values of the sparse tensor. -> Tensor v'4 Data.Int.Int64 -- ^ __shape__: 1-D. tensor represents the shape of the sparse tensor. -- output indices: A list of 1-D tensors represents the indices of the output -- sparse tensors. -> ([Tensor Build Data.Int.Int64], [Tensor Build t], [Tensor Build Data.Int.Int64]) -- ^ (__output_indices__, __output_values__, __output_shape__) -- -- * __output_indices__ -- -- * __output_values__: A list of 1-D tensors represents the values of the output sparse -- tensors. -- -- * __output_shape__: A list of 1-D tensors represents the shape of the output sparse -- tensors. sparseSplit' op'options num_split split_dim indices values shape | eqLengthGuard [] = pureOp [num_split, num_split, num_split] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs split_dim, buildInputs indices, buildInputs values, buildInputs shape] return (opDef "SparseSplit" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "num_split" .~ num_split & op'options & opInputs .~ op'inputs) {- input_arg { name: "split_dim" description: "0-D. The dimension along which to split. Must be in the range\n`[0, rank(shape))`." type: DT_INT64 } input_arg { name: "indices" description: "2-D tensor represents the indices of the sparse tensor." type: DT_INT64 } input_arg { name: "values" description: "1-D tensor represents the values of the sparse tensor." type_attr: "T" } input_arg { name: "shape" description: "1-D. tensor represents the shape of the sparse tensor.\noutput indices: A list of 1-D tensors represents the indices of the output\nsparse tensors." type: DT_INT64 } output_arg { name: "output_indices" type: DT_INT64 number_attr: "num_split" } output_arg { name: "output_values" description: "A list of 1-D tensors represents the values of the output sparse\ntensors." type_attr: "T" number_attr: "num_split" } output_arg { name: "output_shape" description: "A list of 1-D tensors represents the shape of the output sparse\ntensors." type: DT_INT64 number_attr: "num_split" } attr { name: "num_split" type: "int" description: "The number of ways to split." has_minimum: true minimum: 1 } attr { name: "T" type: "type" } -} -- | Adds up a `SparseTensor` and a dense `Tensor`, producing a dense `Tensor`. -- -- This Op does not require `a_indices` be sorted in standard lexicographic order. sparseTensorDenseAdd :: forall v'1 v'2 v'3 v'4 t tindices . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor v'1 tindices -- ^ __a_indices__: 2-D. The `indices` of the `SparseTensor`, with shape `[nnz, ndims]`. -> Tensor v'2 t -- ^ __a_values__: 1-D. The `values` of the `SparseTensor`, with shape `[nnz]`. -> Tensor v'3 tindices -- ^ __a_shape__: 1-D. The `shape` of the `SparseTensor`, with shape `[ndims]`. -> Tensor v'4 t -- ^ __b__: `ndims`-D Tensor. With shape `a_shape`. -> Tensor Build t -- ^ __output__ sparseTensorDenseAdd = sparseTensorDenseAdd' id sparseTensorDenseAdd' :: forall v'1 v'2 v'3 v'4 t tindices . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor v'1 tindices -- ^ __a_indices__: 2-D. The `indices` of the `SparseTensor`, with shape `[nnz, ndims]`. -> Tensor v'2 t -- ^ __a_values__: 1-D. The `values` of the `SparseTensor`, with shape `[nnz]`. -> Tensor v'3 tindices -- ^ __a_shape__: 1-D. The `shape` of the `SparseTensor`, with shape `[ndims]`. -> Tensor v'4 t -- ^ __b__: `ndims`-D Tensor. With shape `a_shape`. -> Tensor Build t -- ^ __output__ sparseTensorDenseAdd' op'options a_indices a_values a_shape b | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs a_indices, buildInputs a_values, buildInputs a_shape, buildInputs b] return (opDef "SparseTensorDenseAdd" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "a_indices" description: "2-D. The `indices` of the `SparseTensor`, with shape `[nnz, ndims]`." type_attr: "Tindices" } input_arg { name: "a_values" description: "1-D. The `values` of the `SparseTensor`, with shape `[nnz]`." type_attr: "T" } input_arg { name: "a_shape" description: "1-D. The `shape` of the `SparseTensor`, with shape `[ndims]`." type_attr: "Tindices" } input_arg { name: "b" description: "`ndims`-D Tensor. With shape `a_shape`." type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | Multiply SparseTensor (of rank 2) "A" by dense matrix "B". -- -- No validity checking is performed on the indices of A. However, the following -- input format is recommended for optimal behavior: -- -- if adjoint_a == false: -- A should be sorted in lexicographically increasing order. Use SparseReorder -- if you're not sure. -- if adjoint_a == true: -- A should be sorted in order of increasing dimension 1 (i.e., "column major" -- order instead of "row major" order). sparseTensorDenseMatMul :: forall v'1 v'2 v'3 v'4 t . (TensorType t) => Tensor v'1 Data.Int.Int64 -- ^ __a_indices__: 2-D. The `indices` of the `SparseTensor`, size `[nnz, 2]` Matrix. -> Tensor v'2 t -- ^ __a_values__: 1-D. The `values` of the `SparseTensor`, size `[nnz]` Vector. -> Tensor v'3 Data.Int.Int64 -- ^ __a_shape__: 1-D. The `shape` of the `SparseTensor`, size `[2]` Vector. -> Tensor v'4 t -- ^ __b__: 2-D. A dense Matrix. -> Tensor Build t -- ^ __product__ sparseTensorDenseMatMul = sparseTensorDenseMatMul' id sparseTensorDenseMatMul' :: forall v'1 v'2 v'3 v'4 t . (TensorType t) => OpParams -> Tensor v'1 Data.Int.Int64 -- ^ __a_indices__: 2-D. The `indices` of the `SparseTensor`, size `[nnz, 2]` Matrix. -> Tensor v'2 t -- ^ __a_values__: 1-D. The `values` of the `SparseTensor`, size `[nnz]` Vector. -> Tensor v'3 Data.Int.Int64 -- ^ __a_shape__: 1-D. The `shape` of the `SparseTensor`, size `[2]` Vector. -> Tensor v'4 t -- ^ __b__: 2-D. A dense Matrix. -> Tensor Build t -- ^ __product__ sparseTensorDenseMatMul' op'options a_indices a_values a_shape b | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs a_indices, buildInputs a_values, buildInputs a_shape, buildInputs b] return (opDef "SparseTensorDenseMatMul" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "a_indices" description: "2-D. The `indices` of the `SparseTensor`, size `[nnz, 2]` Matrix." type: DT_INT64 } input_arg { name: "a_values" description: "1-D. The `values` of the `SparseTensor`, size `[nnz]` Vector." type_attr: "T" } input_arg { name: "a_shape" description: "1-D. The `shape` of the `SparseTensor`, size `[2]` Vector." type: DT_INT64 } input_arg { name: "b" description: "2-D. A dense Matrix." type_attr: "T" } output_arg { name: "product" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "adjoint_a" type: "bool" default_value { b: false } description: "Use the adjoint of A in the matrix multiply. If A is complex, this\nis transpose(conj(A)). Otherwise it\'s transpose(A)." } attr { name: "adjoint_b" type: "bool" default_value { b: false } description: "Use the adjoint of B in the matrix multiply. If B is complex, this\nis transpose(conj(B)). Otherwise it\'s transpose(B)." } -} -- | Converts a sparse representation into a dense tensor. -- -- Builds an array `dense` with shape `output_shape` such that -- -- ```prettyprint -- # If sparse_indices is scalar -- dense[i] = (i == sparse_indices ? sparse_values : default_value) -- -- # If sparse_indices is a vector, then for each i -- dense[sparse_indices[i]] = sparse_values[i] -- -- # If sparse_indices is an n by d matrix, then for each i in [0, n) -- dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = sparse_values[i] -- ``` -- -- All other values in `dense` are set to `default_value`. If `sparse_values` is a -- scalar, all sparse indices are set to this single value. -- -- Indices should be sorted in lexicographic order, and indices must not -- contain any repeats. If `validate_indices` is true, these properties -- are checked during execution. sparseToDense :: forall v'1 v'2 v'3 v'4 t tindices . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor v'1 tindices -- ^ __sparse_indices__: 0-D, 1-D, or 2-D. `sparse_indices[i]` contains the complete -- index where `sparse_values[i]` will be placed. -> Tensor v'2 tindices -- ^ __output_shape__: 1-D. Shape of the dense output tensor. -> Tensor v'3 t -- ^ __sparse_values__: 1-D. Values corresponding to each row of `sparse_indices`, -- or a scalar value to be used for all sparse indices. -> Tensor v'4 t -- ^ __default_value__: Scalar value to set for indices not specified in -- `sparse_indices`. -> Tensor Build t -- ^ __dense__: Dense output tensor of shape `output_shape`. sparseToDense = sparseToDense' id sparseToDense' :: forall v'1 v'2 v'3 v'4 t tindices . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor v'1 tindices -- ^ __sparse_indices__: 0-D, 1-D, or 2-D. `sparse_indices[i]` contains the complete -- index where `sparse_values[i]` will be placed. -> Tensor v'2 tindices -- ^ __output_shape__: 1-D. Shape of the dense output tensor. -> Tensor v'3 t -- ^ __sparse_values__: 1-D. Values corresponding to each row of `sparse_indices`, -- or a scalar value to be used for all sparse indices. -> Tensor v'4 t -- ^ __default_value__: Scalar value to set for indices not specified in -- `sparse_indices`. -> Tensor Build t -- ^ __dense__: Dense output tensor of shape `output_shape`. sparseToDense' op'options sparse_indices output_shape sparse_values default_value | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs sparse_indices, buildInputs output_shape, buildInputs sparse_values, buildInputs default_value] return (opDef "SparseToDense" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "sparse_indices" description: "0-D, 1-D, or 2-D. `sparse_indices[i]` contains the complete\nindex where `sparse_values[i]` will be placed." type_attr: "Tindices" } input_arg { name: "output_shape" description: "1-D. Shape of the dense output tensor." type_attr: "Tindices" } input_arg { name: "sparse_values" description: "1-D. Values corresponding to each row of `sparse_indices`,\nor a scalar value to be used for all sparse indices." type_attr: "T" } input_arg { name: "default_value" description: "Scalar value to set for indices not specified in\n`sparse_indices`." type_attr: "T" } output_arg { name: "dense" description: "Dense output tensor of shape `output_shape`." type_attr: "T" } attr { name: "validate_indices" type: "bool" default_value { b: true } description: "If true, indices are checked to make sure they are sorted in\nlexicographic order and that there are no repeats." } attr { name: "T" type: "type" } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | Applies set operation along last dimension of 2 `SparseTensor` inputs. -- -- See SetOperationOp::SetOperationFromContext for values of `set_operation`. -- -- If `validate_indices` is `True`, `SparseToSparseSetOperation` validates the -- order and range of `set1` and `set2` indices. -- -- Input `set1` is a `SparseTensor` represented by `set1_indices`, `set1_values`, -- and `set1_shape`. For `set1` ranked `n`, 1st `n-1` dimensions must be the same -- as `set2`. Dimension `n` contains values in a set, duplicates are allowed but -- ignored. -- -- Input `set2` is a `SparseTensor` represented by `set2_indices`, `set2_values`, -- and `set2_shape`. For `set2` ranked `n`, 1st `n-1` dimensions must be the same -- as `set1`. Dimension `n` contains values in a set, duplicates are allowed but -- ignored. -- -- If `validate_indices` is `True`, this op validates the order and range of `set1` -- and `set2` indices. -- -- Output `result` is a `SparseTensor` represented by `result_indices`, -- `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this -- has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth` -- dimension contains the result of `set_operation` applied to the corresponding -- `[0...n-1]` dimension of `set`. sparseToSparseSetOperation :: forall v'1 v'2 v'3 v'4 v'5 v'6 t . (OneOf '[Data.ByteString.ByteString, Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8] t) => Tensor v'1 Data.Int.Int64 -- ^ __set1_indices__: 2D `Tensor`, indices of a `SparseTensor`. Must be in row-major -- order. -> Tensor v'2 t -- ^ __set1_values__: 1D `Tensor`, values of a `SparseTensor`. Must be in row-major -- order. -> Tensor v'3 Data.Int.Int64 -- ^ __set1_shape__: 1D `Tensor`, shape of a `SparseTensor`. `set1_shape[0...n-1]` must -- be the same as `set2_shape[0...n-1]`, `set1_shape[n]` is the -- max set size across `0...n-1` dimensions. -> Tensor v'4 Data.Int.Int64 -- ^ __set2_indices__: 2D `Tensor`, indices of a `SparseTensor`. Must be in row-major -- order. -> Tensor v'5 t -- ^ __set2_values__: 1D `Tensor`, values of a `SparseTensor`. Must be in row-major -- order. -> Tensor v'6 Data.Int.Int64 -- ^ __set2_shape__: 1D `Tensor`, shape of a `SparseTensor`. `set2_shape[0...n-1]` must -- be the same as `set1_shape[0...n-1]`, `set2_shape[n]` is the -- max set size across `0...n-1` dimensions. -> (Tensor Build Data.Int.Int64, Tensor Build t, Tensor Build Data.Int.Int64) -- ^ (__result_indices__, __result_values__, __result_shape__) -- -- * __result_indices__: 2D indices of a `SparseTensor`. -- -- * __result_values__: 1D values of a `SparseTensor`. -- -- * __result_shape__: 1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is -- the same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]` -- is the max result set size across all `0...n-1` dimensions. sparseToSparseSetOperation = sparseToSparseSetOperation' id sparseToSparseSetOperation' :: forall v'1 v'2 v'3 v'4 v'5 v'6 t . (OneOf '[Data.ByteString.ByteString, Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8] t) => OpParams -> Tensor v'1 Data.Int.Int64 -- ^ __set1_indices__: 2D `Tensor`, indices of a `SparseTensor`. Must be in row-major -- order. -> Tensor v'2 t -- ^ __set1_values__: 1D `Tensor`, values of a `SparseTensor`. Must be in row-major -- order. -> Tensor v'3 Data.Int.Int64 -- ^ __set1_shape__: 1D `Tensor`, shape of a `SparseTensor`. `set1_shape[0...n-1]` must -- be the same as `set2_shape[0...n-1]`, `set1_shape[n]` is the -- max set size across `0...n-1` dimensions. -> Tensor v'4 Data.Int.Int64 -- ^ __set2_indices__: 2D `Tensor`, indices of a `SparseTensor`. Must be in row-major -- order. -> Tensor v'5 t -- ^ __set2_values__: 1D `Tensor`, values of a `SparseTensor`. Must be in row-major -- order. -> Tensor v'6 Data.Int.Int64 -- ^ __set2_shape__: 1D `Tensor`, shape of a `SparseTensor`. `set2_shape[0...n-1]` must -- be the same as `set1_shape[0...n-1]`, `set2_shape[n]` is the -- max set size across `0...n-1` dimensions. -> (Tensor Build Data.Int.Int64, Tensor Build t, Tensor Build Data.Int.Int64) -- ^ (__result_indices__, __result_values__, __result_shape__) -- -- * __result_indices__: 2D indices of a `SparseTensor`. -- -- * __result_values__: 1D values of a `SparseTensor`. -- -- * __result_shape__: 1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is -- the same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]` -- is the max result set size across all `0...n-1` dimensions. sparseToSparseSetOperation' op'options set1_indices set1_values set1_shape set2_indices set2_values set2_shape | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs set1_indices, buildInputs set1_values, buildInputs set1_shape, buildInputs set2_indices, buildInputs set2_values, buildInputs set2_shape] return (opDef "SparseToSparseSetOperation" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "set1_indices" description: "2D `Tensor`, indices of a `SparseTensor`. Must be in row-major\norder." type: DT_INT64 } input_arg { name: "set1_values" description: "1D `Tensor`, values of a `SparseTensor`. Must be in row-major\norder." type_attr: "T" } input_arg { name: "set1_shape" description: "1D `Tensor`, shape of a `SparseTensor`. `set1_shape[0...n-1]` must\nbe the same as `set2_shape[0...n-1]`, `set1_shape[n]` is the\nmax set size across `0...n-1` dimensions." type: DT_INT64 } input_arg { name: "set2_indices" description: "2D `Tensor`, indices of a `SparseTensor`. Must be in row-major\norder." type: DT_INT64 } input_arg { name: "set2_values" description: "1D `Tensor`, values of a `SparseTensor`. Must be in row-major\norder." type_attr: "T" } input_arg { name: "set2_shape" description: "1D `Tensor`, shape of a `SparseTensor`. `set2_shape[0...n-1]` must\nbe the same as `set1_shape[0...n-1]`, `set2_shape[n]` is the\nmax set size across `0...n-1` dimensions." type: DT_INT64 } output_arg { name: "result_indices" description: "2D indices of a `SparseTensor`." type: DT_INT64 } output_arg { name: "result_values" description: "1D values of a `SparseTensor`." type_attr: "T" } output_arg { name: "result_shape" description: "1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is\nthe same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]`\nis the max result set size across all `0...n-1` dimensions." type: DT_INT64 } attr { name: "set_operation" type: "string" } attr { name: "validate_indices" type: "bool" default_value { b: true } } attr { name: "T" type: "type" allowed_values { list { type: DT_INT8 type: DT_INT16 type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_UINT16 type: DT_STRING } } } -} -- | Splits a tensor into `num_split` tensors along one dimension. split :: forall v'1 v'2 t . (TensorType t) => Data.Int.Int64 -- ^ __num_split__: The number of ways to split. Must evenly divide -- `value.shape[split_dim]`. -> Tensor v'1 Data.Int.Int32 -- ^ __split_dim__: 0-D. The dimension along which to split. Must be in the range -- `[0, rank(value))`. -> Tensor v'2 t -- ^ __value__: The tensor to split. -> [Tensor Build t] -- ^ __output__: They are identically shaped tensors, whose shape matches that of `value` -- except along `split_dim`, where their sizes are -- `values.shape[split_dim] / num_split`. split = split' id split' :: forall v'1 v'2 t . (TensorType t) => OpParams -> Data.Int.Int64 -- ^ __num_split__: The number of ways to split. Must evenly divide -- `value.shape[split_dim]`. -> Tensor v'1 Data.Int.Int32 -- ^ __split_dim__: 0-D. The dimension along which to split. Must be in the range -- `[0, rank(value))`. -> Tensor v'2 t -- ^ __value__: The tensor to split. -> [Tensor Build t] -- ^ __output__: They are identically shaped tensors, whose shape matches that of `value` -- except along `split_dim`, where their sizes are -- `values.shape[split_dim] / num_split`. split' op'options num_split split_dim value | eqLengthGuard [] = pureOp [num_split] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs split_dim, buildInputs value] return (opDef "Split" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "num_split" .~ num_split & op'options & opInputs .~ op'inputs) {- input_arg { name: "split_dim" description: "0-D. The dimension along which to split. Must be in the range\n`[0, rank(value))`." type: DT_INT32 } input_arg { name: "value" description: "The tensor to split." type_attr: "T" } output_arg { name: "output" description: "They are identically shaped tensors, whose shape matches that of `value`\nexcept along `split_dim`, where their sizes are\n`values.shape[split_dim] / num_split`." type_attr: "T" number_attr: "num_split" } attr { name: "num_split" type: "int" description: "The number of ways to split. Must evenly divide\n`value.shape[split_dim]`." has_minimum: true minimum: 1 } attr { name: "T" type: "type" } -} -- | Splits a tensor into `num_split` tensors along one dimension. splitV :: forall v'1 v'2 v'3 t tlen . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tlen) => Data.Int.Int64 -- ^ __num_split__ -> Tensor v'1 t -- ^ __value__: The tensor to split. -> Tensor v'2 tlen -- ^ __size_splits__: list containing the sizes of each output tensor along the split -- dimension. Must sum to the dimension of value along split_dim. -- Can contain one -1 indicating that dimension is to be inferred. -> Tensor v'3 Data.Int.Int32 -- ^ __split_dim__: 0-D. The dimension along which to split. Must be in the range -- `[0, rank(value))`. -> [Tensor Build t] -- ^ __output__: Tensors whose shape matches that of `value` -- except along `split_dim`, where their sizes are -- `size_splits[i]`. splitV = splitV' id splitV' :: forall v'1 v'2 v'3 t tlen . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tlen) => OpParams -> Data.Int.Int64 -- ^ __num_split__ -> Tensor v'1 t -- ^ __value__: The tensor to split. -> Tensor v'2 tlen -- ^ __size_splits__: list containing the sizes of each output tensor along the split -- dimension. Must sum to the dimension of value along split_dim. -- Can contain one -1 indicating that dimension is to be inferred. -> Tensor v'3 Data.Int.Int32 -- ^ __split_dim__: 0-D. The dimension along which to split. Must be in the range -- `[0, rank(value))`. -> [Tensor Build t] -- ^ __output__: Tensors whose shape matches that of `value` -- except along `split_dim`, where their sizes are -- `size_splits[i]`. splitV' op'options num_split value size_splits split_dim | eqLengthGuard [] = pureOp [num_split] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs value, buildInputs size_splits, buildInputs split_dim] return (opDef "SplitV" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tlen" .~ tensorType (undefined :: tlen) & opAttr "num_split" .~ num_split & op'options & opInputs .~ op'inputs) {- input_arg { name: "value" description: "The tensor to split." type_attr: "T" } input_arg { name: "size_splits" description: "list containing the sizes of each output tensor along the split\ndimension. Must sum to the dimension of value along split_dim.\nCan contain one -1 indicating that dimension is to be inferred." type_attr: "Tlen" } input_arg { name: "split_dim" description: "0-D. The dimension along which to split. Must be in the range\n`[0, rank(value))`." type: DT_INT32 } output_arg { name: "output" description: "Tensors whose shape matches that of `value`\nexcept along `split_dim`, where their sizes are\n`size_splits[i]`." type_attr: "T" number_attr: "num_split" } attr { name: "num_split" type: "int" has_minimum: true minimum: 1 } attr { name: "T" type: "type" } attr { name: "Tlen" type: "type" default_value { type: DT_INT64 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | Computes square root of x element-wise. -- -- I.e., \\(y = \sqrt{x} = x^{1/2}\\). sqrt :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ sqrt = sqrt' id sqrt' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ sqrt' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "Sqrt" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | Computes the gradient for the sqrt of `x` wrt its input. -- -- Specifically, `grad = dy * 0.5 / y`, where `y = sqrt(x)`, and `dy` -- is the corresponding input gradient. sqrtGrad :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ sqrtGrad = sqrtGrad' id sqrtGrad' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ sqrtGrad' op'options x y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y] return (opDef "SqrtGrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | Computes square of x element-wise. -- -- I.e., \\(y = x * x = x^2\\). square :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ square = square' id square' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ square' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "Square" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | Returns (x - y)(x - y) element-wise. -- -- *NOTE*: `SquaredDifference` supports broadcasting. More about broadcasting -- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) squaredDifference :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ squaredDifference = squaredDifference' id squaredDifference' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ squaredDifference' op'options x y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y] return (opDef "SquaredDifference" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | Removes dimensions of size 1 from the shape of a tensor. -- -- Given a tensor `input`, this operation returns a tensor of the same type with -- all dimensions of size 1 removed. If you don't want to remove all size 1 -- dimensions, you can remove specific size 1 dimensions by specifying -- `squeeze_dims`. -- -- For example: -- -- ```prettyprint -- # 't' is a tensor of shape [1, 2, 1, 3, 1, 1] -- shape(squeeze(t)) ==> [2, 3] -- ``` -- -- Or, to remove specific size 1 dimensions: -- -- ```prettyprint -- # 't' is a tensor of shape [1, 2, 1, 3, 1, 1] -- shape(squeeze(t, [2, 4])) ==> [1, 2, 3, 1] -- ``` squeeze :: forall v'1 t . (TensorType t) => Tensor v'1 t -- ^ __input__: The `input` to squeeze. -> Tensor Build t -- ^ __output__: Contains the same data as `input`, but has one or more dimensions of -- size 1 removed. squeeze = squeeze' id squeeze' :: forall v'1 t . (TensorType t) => OpParams -> Tensor v'1 t -- ^ __input__: The `input` to squeeze. -> Tensor Build t -- ^ __output__: Contains the same data as `input`, but has one or more dimensions of -- size 1 removed. squeeze' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "Squeeze" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "The `input` to squeeze." type_attr: "T" } output_arg { name: "output" description: "Contains the same data as `input`, but has one or more dimensions of\nsize 1 removed." type_attr: "T" } attr { name: "T" type: "type" } attr { name: "squeeze_dims" type: "list(int)" default_value { list { } } description: "If specified, only squeezes the dimensions listed. The dimension\nindex starts at 0. It is an error to squeeze a dimension that is not 1." has_minimum: true } -} -- | A stack that produces elements in first-in last-out order. stack :: forall m' . (MonadBuild m') => DataType -- ^ __elem_type__: The type of the elements on the stack. -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __handle__: The handle to the stack. stack = stack' id stack' :: forall m' . (MonadBuild m') => OpParams -> DataType -- ^ __elem_type__: The type of the elements on the stack. -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __handle__: The handle to the stack. stack' op'options elem_type | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "Stack" & opAttr "elem_type" .~ elem_type & op'options & opInputs .~ op'inputs) {- output_arg { name: "handle" description: "The handle to the stack." type: DT_STRING is_ref: true } attr { name: "elem_type" type: "type" description: "The type of the elements on the stack." } attr { name: "stack_name" type: "string" default_value { s: "" } description: "Overrides the name used for the temporary stack resource. Default\nvalue is the name of the \'Stack\' op (which is guaranteed unique)." } -} -- | Delete the stack from its resource container. stackClose :: forall m' . (MonadBuild m') => Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a stack. -> m' (ControlNode) stackClose = stackClose' id stackClose' :: forall m' . (MonadBuild m') => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a stack. -> m' (ControlNode) stackClose' op'options handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle] buildOp [] (opDef "StackClose" & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" description: "The handle to a stack." type: DT_STRING is_ref: true } -} -- | Pop the element at the top of the stack. stackPop :: forall elem_type m' . (MonadBuild m', TensorType elem_type) => Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a stack. -> m' (Tensor Value elem_type) -- ^ __elem__: The tensor that is popped from the top of the stack. stackPop = stackPop' id stackPop' :: forall elem_type m' . (MonadBuild m', TensorType elem_type) => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a stack. -> m' (Tensor Value elem_type) -- ^ __elem__: The tensor that is popped from the top of the stack. stackPop' op'options handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle] buildOp [] (opDef "StackPop" & opAttr "elem_type" .~ tensorType (undefined :: elem_type) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" description: "The handle to a stack." type: DT_STRING is_ref: true } output_arg { name: "elem" description: "The tensor that is popped from the top of the stack." type_attr: "elem_type" } attr { name: "elem_type" type: "type" description: "The type of the elem that is popped." } -} -- | Push an element onto the stack. stackPush :: forall v'2 t m' . (MonadBuild m', TensorType t) => Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a stack. -> Tensor v'2 t -- ^ __elem__: The tensor to be pushed onto the stack. -> m' (Tensor Value t) -- ^ __output__: The same tensor as the input 'elem'. stackPush = stackPush' id stackPush' :: forall v'2 t m' . (MonadBuild m', TensorType t) => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__: The handle to a stack. -> Tensor v'2 t -- ^ __elem__: The tensor to be pushed onto the stack. -> m' (Tensor Value t) -- ^ __output__: The same tensor as the input 'elem'. stackPush' op'options handle elem | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs elem] buildOp [] (opDef "StackPush" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" description: "The handle to a stack." type: DT_STRING is_ref: true } input_arg { name: "elem" description: "The tensor to be pushed onto the stack." type_attr: "T" } output_arg { name: "output" description: "The same tensor as the input \'elem\'." type_attr: "T" } attr { name: "T" type: "type" } attr { name: "swap_memory" type: "bool" default_value { b: false } description: "Swap `elem` to CPU. Default to false." } -} -- | Stage values similar to a lightweight Enqueue. The basic functionality of this -- -- Op is similar to a queue with many fewer capabilities and options. This Op is -- optimized for performance. stage :: forall v'1 dtypes m' . (MonadBuild m', TensorTypes dtypes) => TensorList (v'1) dtypes -- ^ __values__: a list of tensors -> m' (ControlNode) stage = stage' id stage' :: forall v'1 dtypes m' . (MonadBuild m', TensorTypes dtypes) => OpParams -> TensorList (v'1) dtypes -- ^ __values__: a list of tensors -> m' (ControlNode) stage' op'options values | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs values] buildOp [] (opDef "Stage" & opAttr "dtypes" .~ fromTensorTypes (Proxy :: Proxy dtypes) & op'options & opInputs .~ op'inputs) {- input_arg { name: "values" description: "a list of tensors" type_list_attr: "dtypes" } attr { name: "dtypes" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "container" type: "string" default_value { s: "" } description: "If non-empty, this queue is placed in the given container. Otherwise,\na default container is used." } attr { name: "shared_name" type: "string" default_value { s: "" } description: "It is necessary to match this name to the matching Unstage Op." } -} -- | Stops gradient computation. -- -- When executed in a graph, this op outputs its input tensor as-is. -- -- When building ops to compute gradients, this op prevents the contribution of -- its inputs to be taken into account. Normally, the gradient generator adds ops -- to a graph to compute the derivatives of a specified 'loss' by recursively -- finding out inputs that contributed to its computation. If you insert this op -- in the graph it inputs are masked from the gradient generator. They are not -- taken into account for computing gradients. -- -- This is useful any time you want to compute a value with TensorFlow but need -- to pretend that the value was a constant. Some examples include: -- -- * The *EM* algorithm where the *M-step* should not involve backpropagation -- through the output of the *E-step*. -- * Contrastive divergence training of Boltzmann machines where, when -- differentiating the energy function, the training must not backpropagate -- through the graph that generated the samples from the model. -- * Adversarial training, where no backprop should happen through the adversarial -- example generation process. stopGradient :: forall v'1 t . (TensorType t) => Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ stopGradient = stopGradient' id stopGradient' :: forall v'1 t . (TensorType t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ stopGradient' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "StopGradient" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } -} -- | Return a strided slice from `input`. -- -- Note, most python users will want to use the Python `Tensor.__getitem__` -- or `Variable.__getitem__` rather than this op directly. -- -- The goal of this op is to produce a new tensor with a subset of -- the elements from the `n` dimensional `input` tensor. The subset is chosen using -- a sequence of `m` sparse range specifications encoded into the arguments -- of this function. Note, in some cases -- `m` could be equal to `n`, but this need not be the case. Each -- range specification entry can be one of the following: -- -- - An ellipsis (...). Ellipses are used to imply zero or more -- dimensions of full-dimension selection and are produced using -- `ellipsis_mask`. For example, `foo[...]` is the identity slice. -- -- - A new axis. This is used to insert a new shape=1 dimension and is -- produced using `new_axis_mask`. For example, `foo[:, ...]` where -- `foo` is shape `(3, 4)` produces a `(1, 3, 4)` tensor. -- -- -- - A range `begin:end:stride`. This is used to specify how much to choose from -- a given dimension. `stride` can be any integer but 0. `begin` is an integer -- which represents the index of the first value to select while `end` represents -- the index of the last value to select. The number of values selected in each -- dimension is `end - begin` if `stride > 0` and `begin - end` if `stride < 0`. -- `begin` and `end` can be negative where `-1` is the last element, `-2` is -- the second to last. `begin_mask` controls whether to replace the explicitly -- given `begin` with an implicit effective value of `0` if `stride > 0` and -- `-1` if `stride < 0`. `end_mask` is analogous but produces the number -- required to create the largest open interval. For example, given a shape -- `(3,)` tensor `foo[:]`, the effective `begin` and `end` are `0` and `3`. Do -- not assume this is equivalent to `foo[0:-1]` which has an effective `begin` -- and `end` of `0` and `2`. Another example is `foo[-2::-1]` which reverses the -- first dimension of a tensor while dropping the last two (in the original -- order elements). For example `foo = [1,2,3,4]; foo[-2::-1]` is `[4,3]`. -- -- - A single index. This is used to keep only elements that have a given -- index. For example (`foo[2, :]` on a shape `(5,6)` tensor produces a -- shape `(6,)` tensor. This is encoded in `begin` and `end` and -- `shrink_axis_mask`. -- -- Each conceptual range specification is encoded in the op's argument. This -- encoding is best understand by considering a non-trivial example. In -- particular, -- `foo[1, 2:4, None, ..., :-3:-1, :]` will be encoded as -- -- ```prettyprint -- begin = [1, 2, x, x, 0, x] # x denotes don't care (usually 0) -- end = [2, 4, x, x, -3, x] -- strides = [1, 1, x, x, -1, 1] -- begin_mask = 1<<4 | 1 << 5 = 48 -- end_mask = 1<<5 = 32 -- ellipsis_mask = 1<<3 = 8 -- new_axis_mask = 1<<2 4 -- shrink_axis_mask = 1<<0 -- ``` -- -- In this case if `foo.shape` is (5, 5, 5, 5, 5, 5) the final shape of -- the slice becomes (2, 1, 5, 5, 2, 5). -- Let us walk step by step through each argument specification. -- -- 1. The first argument in the example slice is turned into `begin = 1` and -- `end = begin + 1 = 2`. To disambiguate from the original spec `2:4` we -- also set the appropriate bit in `shrink_axis_mask`. -- -- 2. `2:4` is contributes 2, 4, 1 to begin, end, and stride. All masks have -- zero bits contributed. -- -- 3. None is a synonym for `tf.newaxis`. This means insert a dimension of size 1 -- dimension in the final shape. Dummy values are contributed to begin, -- end and stride, while the new_axis_mask bit is set. -- -- 4. `...` grab the full ranges from as many dimensions as needed to -- fully specify a slice for every dimension of the input shape. -- -- 5. `:-3:-1` shows the use of negative indices. A negative index `i` associated -- with a dimension that has shape `s` is converted to a positive index -- `s + i`. So `-1` becomes `s-1` (i.e. the last element). This conversion -- is done internally so begin, end and strides receive x, -3, and -1. -- The appropriate begin_mask bit is set to indicate the start range is the -- full range (ignoring the x). -- -- 6. `:` indicates that the entire contents of the corresponding dimension -- is selected. This is equivalent to `::` or `0::1`. begin, end, and strides -- receive 0, 0, and 1, respectively. The appropriate bits in `begin_mask` and -- `end_mask` are also set. -- -- *Requirements*: -- `0 != strides[i] for i in [0, m)` -- `ellipsis_mask must be a power of two (only one ellipsis)` stridedSlice :: forall v'1 v'2 v'3 v'4 t index . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] index) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 index -- ^ __begin__: `begin[k]` specifies the offset into the `k`th range specification. -- The exact dimension this corresponds to will be determined by context. -- Out-of-bounds values will be silently clamped. If the `k`th bit of -- `begin_mask` then `begin[k]` is ignored and the full range of the -- appropriate dimension is used instead. Negative values causes indexing -- to start from the highest element e.g. If `foo==[1,2,3]` then `foo[-1]==3`. -> Tensor v'3 index -- ^ __end__: `end[i]` is like `begin` with the exception that `end_mask` is -- used to determine full ranges. -> Tensor v'4 index -- ^ __strides__: `strides[i]` specifies the increment in the `i`th specification -- after extracting a given element. Negative indices will reverse -- the original order. Out or range values are -- clamped to `[0,dim[i]) if slice[i]>0` or `[-1,dim[i]-1] if slice[i] < 0` -> Tensor Build t -- ^ __output__ stridedSlice = stridedSlice' id stridedSlice' :: forall v'1 v'2 v'3 v'4 t index . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] index) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 index -- ^ __begin__: `begin[k]` specifies the offset into the `k`th range specification. -- The exact dimension this corresponds to will be determined by context. -- Out-of-bounds values will be silently clamped. If the `k`th bit of -- `begin_mask` then `begin[k]` is ignored and the full range of the -- appropriate dimension is used instead. Negative values causes indexing -- to start from the highest element e.g. If `foo==[1,2,3]` then `foo[-1]==3`. -> Tensor v'3 index -- ^ __end__: `end[i]` is like `begin` with the exception that `end_mask` is -- used to determine full ranges. -> Tensor v'4 index -- ^ __strides__: `strides[i]` specifies the increment in the `i`th specification -- after extracting a given element. Negative indices will reverse -- the original order. Out or range values are -- clamped to `[0,dim[i]) if slice[i]>0` or `[-1,dim[i]-1] if slice[i] < 0` -> Tensor Build t -- ^ __output__ stridedSlice' op'options input begin end strides | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs begin, buildInputs end, buildInputs strides] return (opDef "StridedSlice" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Index" .~ tensorType (undefined :: index) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "begin" description: "`begin[k]` specifies the offset into the `k`th range specification.\nThe exact dimension this corresponds to will be determined by context.\nOut-of-bounds values will be silently clamped. If the `k`th bit of\n`begin_mask` then `begin[k]` is ignored and the full range of the\nappropriate dimension is used instead. Negative values causes indexing\nto start from the highest element e.g. If `foo==[1,2,3]` then `foo[-1]==3`." type_attr: "Index" } input_arg { name: "end" description: "`end[i]` is like `begin` with the exception that `end_mask` is\nused to determine full ranges." type_attr: "Index" } input_arg { name: "strides" description: "`strides[i]` specifies the increment in the `i`th specification\nafter extracting a given element. Negative indices will reverse\nthe original order. Out or range values are\nclamped to `[0,dim[i]) if slice[i]>0` or `[-1,dim[i]-1] if slice[i] < 0`" type_attr: "Index" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "Index" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "begin_mask" type: "int" default_value { i: 0 } description: "a bitmask where a bit i being 1 means to ignore the begin\nvalue and instead use the largest interval possible. At runtime\nbegin[i] will be replaced with `[0, n-1) if `stride[i] > 0` or\n`[-1, n-1]` if `stride[i] < 0`" } attr { name: "end_mask" type: "int" default_value { i: 0 } description: "analogous to `begin_mask`" } attr { name: "ellipsis_mask" type: "int" default_value { i: 0 } description: "a bitmask where bit `i` being 1 means the `i`th\nposition is actually an ellipsis. One bit at most can be 1.\nIf `ellipsis_mask == 0`, then an implicit ellipsis mask of `1 << (m+1)`\nis provided. This means that `foo[3:5] == foo[3:5, ...]`. An ellipsis\nimplicitly creates as many range specifications as necessary to fully\nspecify the sliced range for every dimension. For example for a 4-dimensional\ntensor `foo` the slice `foo[2, ..., 5:8]` implies `foo[2, :, :, 5:8]`." } attr { name: "new_axis_mask" type: "int" default_value { i: 0 } description: "a bitmask where bit `i` being 1 means the `i`th\nspecification creates a new shape 1 dimension. For example\n`foo[:4, tf.newaxis, :2]` would produce a shape `(4, 1, 2)` tensor." } attr { name: "shrink_axis_mask" type: "int" default_value { i: 0 } description: "a bitmask where bit `i` implies that the `i`th\nspecification should shrink the dimensionality. begin and end\nmust imply a slice of size 1 in the dimension. For example in\npython one might do `foo[:, 3, :]` which would result in\n`shrink_axis_mask` being 2." } -} -- | Assign `value` to the sliced l-value reference of `ref`. -- -- The values of `value` are assigned to the positions in the variable -- `ref` that are selected by the slice parameters. The slice parameters -- `begin, `end`, `strides`, etc. work exactly as in `StridedSlice`. -- -- NOTE this op currently does not support broadcasting and so `value`'s -- shape must be exactly the shape produced by the slice of `ref`. stridedSliceAssign :: forall v'2 v'3 v'4 v'5 t index m' . (MonadBuild m', TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] index) => Tensor Ref t -- ^ __ref__ -> Tensor v'2 index -- ^ __begin__ -> Tensor v'3 index -- ^ __end__ -> Tensor v'4 index -- ^ __strides__ -> Tensor v'5 t -- ^ __value__ -> m' (Tensor Ref t) -- ^ __output_ref__ stridedSliceAssign = stridedSliceAssign' id stridedSliceAssign' :: forall v'2 v'3 v'4 v'5 t index m' . (MonadBuild m', TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] index) => OpParams -> Tensor Ref t -- ^ __ref__ -> Tensor v'2 index -- ^ __begin__ -> Tensor v'3 index -- ^ __end__ -> Tensor v'4 index -- ^ __strides__ -> Tensor v'5 t -- ^ __value__ -> m' (Tensor Ref t) -- ^ __output_ref__ stridedSliceAssign' op'options ref begin end strides value | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs ref, buildInputs begin, buildInputs end, buildInputs strides, buildInputs value] buildOp [] (opDef "StridedSliceAssign" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Index" .~ tensorType (undefined :: index) & op'options & opInputs .~ op'inputs) {- input_arg { name: "ref" type_attr: "T" is_ref: true } input_arg { name: "begin" type_attr: "Index" } input_arg { name: "end" type_attr: "Index" } input_arg { name: "strides" type_attr: "Index" } input_arg { name: "value" type_attr: "T" } output_arg { name: "output_ref" type_attr: "T" is_ref: true } attr { name: "T" type: "type" } attr { name: "Index" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "begin_mask" type: "int" default_value { i: 0 } } attr { name: "end_mask" type: "int" default_value { i: 0 } } attr { name: "ellipsis_mask" type: "int" default_value { i: 0 } } attr { name: "new_axis_mask" type: "int" default_value { i: 0 } } attr { name: "shrink_axis_mask" type: "int" default_value { i: 0 } } -} -- | Returns the gradient of `StridedSlice`. -- -- Since `StridedSlice` cuts out pieces of its `input` which is size -- `shape`, its gradient will have the same shape (which is passed here -- as `shape`). The gradient will be zero in any element that the slice -- does not select. -- -- Arguments are the same as StridedSliceGrad with the exception that -- `dy` is the input gradient to be propagated and `shape` is the -- shape of `StridedSlice`'s `input`. stridedSliceGrad :: forall v'1 v'2 v'3 v'4 v'5 t index . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] index) => Tensor v'1 index -- ^ __shape__ -> Tensor v'2 index -- ^ __begin__ -> Tensor v'3 index -- ^ __end__ -> Tensor v'4 index -- ^ __strides__ -> Tensor v'5 t -- ^ __dy__ -> Tensor Build t -- ^ __output__ stridedSliceGrad = stridedSliceGrad' id stridedSliceGrad' :: forall v'1 v'2 v'3 v'4 v'5 t index . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] index) => OpParams -> Tensor v'1 index -- ^ __shape__ -> Tensor v'2 index -- ^ __begin__ -> Tensor v'3 index -- ^ __end__ -> Tensor v'4 index -- ^ __strides__ -> Tensor v'5 t -- ^ __dy__ -> Tensor Build t -- ^ __output__ stridedSliceGrad' op'options shape begin end strides dy | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs shape, buildInputs begin, buildInputs end, buildInputs strides, buildInputs dy] return (opDef "StridedSliceGrad" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Index" .~ tensorType (undefined :: index) & op'options & opInputs .~ op'inputs) {- input_arg { name: "shape" type_attr: "Index" } input_arg { name: "begin" type_attr: "Index" } input_arg { name: "end" type_attr: "Index" } input_arg { name: "strides" type_attr: "Index" } input_arg { name: "dy" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "Index" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "begin_mask" type: "int" default_value { i: 0 } } attr { name: "end_mask" type: "int" default_value { i: 0 } } attr { name: "ellipsis_mask" type: "int" default_value { i: 0 } } attr { name: "new_axis_mask" type: "int" default_value { i: 0 } } attr { name: "shrink_axis_mask" type: "int" default_value { i: 0 } } -} -- | Joins the strings in the given list of string tensors into one tensor; -- -- with the given separator (default is an empty separator). stringJoin :: [Tensor v'1 Data.ByteString.ByteString] -- ^ __inputs__: A list of string tensors. The tensors must all have the same shape, -- or be scalars. Scalars may be mixed in; these will be broadcast to the shape -- of non-scalar inputs. -> Tensor Build Data.ByteString.ByteString -- ^ __output__ stringJoin = stringJoin' id stringJoin' :: OpParams -> [Tensor v'1 Data.ByteString.ByteString] -- ^ __inputs__: A list of string tensors. The tensors must all have the same shape, -- or be scalars. Scalars may be mixed in; these will be broadcast to the shape -- of non-scalar inputs. -> Tensor Build Data.ByteString.ByteString -- ^ __output__ stringJoin' op'options inputs | eqLengthGuard [("N", [("inputs", length inputs)])] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs inputs] return (opDef "StringJoin" & opAttr "N" .~ n & op'options & opInputs .~ op'inputs) where n = fromIntegral (length inputs) :: Int64 {- input_arg { name: "inputs" description: "A list of string tensors. The tensors must all have the same shape,\nor be scalars. Scalars may be mixed in; these will be broadcast to the shape\nof non-scalar inputs." type: DT_STRING number_attr: "N" } output_arg { name: "output" type: DT_STRING } attr { name: "N" type: "int" has_minimum: true minimum: 1 } attr { name: "separator" type: "string" default_value { s: "" } description: "string, an optional join separator." } -} -- | Split elements of `input` based on `delimiter` into a `SparseTensor`. -- -- Let N be the size of source (typically N will be the batch size). Split each -- element of `input` based on `delimiter` and return a `SparseTensor` -- containing the splitted tokens. Empty tokens are ignored. -- -- `delimiter` can be empty, or a string of split characters. If `delimiter` is an -- empty string, each element of `input` is split into individual single-byte -- character strings, including splitting of UTF-8 multibyte sequences. Otherwise -- every character of `delimiter` is a potential split point. -- -- For example: -- N = 2, input[0] is 'hello world' and input[1] is 'a b c', then the output -- will be -- -- indices = [0, 0; -- 0, 1; -- 1, 0; -- 1, 1; -- 1, 2] -- shape = [2, 3] -- values = ['hello', 'world', 'a', 'b', 'c'] stringSplit :: Tensor v'1 Data.ByteString.ByteString -- ^ __input__: 1-D. Strings to split. -> Tensor v'2 Data.ByteString.ByteString -- ^ __delimiter__: 0-D. Delimiter characters (bytes), or empty string. -> (Tensor Build Data.Int.Int64, Tensor Build Data.ByteString.ByteString, Tensor Build Data.Int.Int64) -- ^ (__indices__, __values__, __shape__) -- -- * __indices__: A dense matrix of int64 representing the indices of the sparse tensor. -- -- * __values__: A vector of strings corresponding to the splited values. -- -- * __shape__: a length-2 vector of int64 representing the shape of the sparse -- tensor, where the first value is N and the second value is the maximum number -- of tokens in a single input entry. stringSplit = stringSplit' id stringSplit' :: OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __input__: 1-D. Strings to split. -> Tensor v'2 Data.ByteString.ByteString -- ^ __delimiter__: 0-D. Delimiter characters (bytes), or empty string. -> (Tensor Build Data.Int.Int64, Tensor Build Data.ByteString.ByteString, Tensor Build Data.Int.Int64) -- ^ (__indices__, __values__, __shape__) -- -- * __indices__: A dense matrix of int64 representing the indices of the sparse tensor. -- -- * __values__: A vector of strings corresponding to the splited values. -- -- * __shape__: a length-2 vector of int64 representing the shape of the sparse -- tensor, where the first value is N and the second value is the maximum number -- of tokens in a single input entry. stringSplit' op'options input delimiter | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs delimiter] return (opDef "StringSplit" & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "1-D. Strings to split." type: DT_STRING } input_arg { name: "delimiter" description: "0-D. Delimiter characters (bytes), or empty string." type: DT_STRING } output_arg { name: "indices" description: "A dense matrix of int64 representing the indices of the sparse tensor." type: DT_INT64 } output_arg { name: "values" description: "A vector of strings corresponding to the splited values." type: DT_STRING } output_arg { name: "shape" description: "a length-2 vector of int64 representing the shape of the sparse\ntensor, where the first value is N and the second value is the maximum number\nof tokens in a single input entry." type: DT_INT64 } -} -- | Converts each string in the input Tensor to its hash mod by a number of buckets. -- -- The hash function is deterministic on the content of the string within the -- process. -- -- Note that the hash function may change from time to time. -- This functionality will be deprecated and it's recommended to use -- `tf.string_to_hash_bucket_fast()` or `tf.string_to_hash_bucket_strong()`. stringToHashBucket :: Data.Int.Int64 -- ^ __num_buckets__: The number of buckets. -> Tensor v'1 Data.ByteString.ByteString -- ^ __string_tensor__ -> Tensor Build Data.Int.Int64 -- ^ __output__: A Tensor of the same shape as the input `string_tensor`. stringToHashBucket = stringToHashBucket' id stringToHashBucket' :: OpParams -> Data.Int.Int64 -- ^ __num_buckets__: The number of buckets. -> Tensor v'1 Data.ByteString.ByteString -- ^ __string_tensor__ -> Tensor Build Data.Int.Int64 -- ^ __output__: A Tensor of the same shape as the input `string_tensor`. stringToHashBucket' op'options num_buckets string_tensor | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs string_tensor] return (opDef "StringToHashBucket" & opAttr "num_buckets" .~ num_buckets & op'options & opInputs .~ op'inputs) {- input_arg { name: "string_tensor" type: DT_STRING } output_arg { name: "output" description: "A Tensor of the same shape as the input `string_tensor`." type: DT_INT64 } attr { name: "num_buckets" type: "int" description: "The number of buckets." has_minimum: true minimum: 1 } -} -- | Converts each string in the input Tensor to its hash mod by a number of buckets. -- -- The hash function is deterministic on the content of the string within the -- process and will never change. However, it is not suitable for cryptography. -- This function may be used when CPU time is scarce and inputs are trusted or -- unimportant. There is a risk of adversaries constructing inputs that all hash -- to the same bucket. To prevent this problem, use a strong hash function with -- `tf.string_to_hash_bucket_strong`. stringToHashBucketFast :: Data.Int.Int64 -- ^ __num_buckets__: The number of buckets. -> Tensor v'1 Data.ByteString.ByteString -- ^ __input__: The strings to assign a hash bucket. -> Tensor Build Data.Int.Int64 -- ^ __output__: A Tensor of the same shape as the input `string_tensor`. stringToHashBucketFast = stringToHashBucketFast' id stringToHashBucketFast' :: OpParams -> Data.Int.Int64 -- ^ __num_buckets__: The number of buckets. -> Tensor v'1 Data.ByteString.ByteString -- ^ __input__: The strings to assign a hash bucket. -> Tensor Build Data.Int.Int64 -- ^ __output__: A Tensor of the same shape as the input `string_tensor`. stringToHashBucketFast' op'options num_buckets input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "StringToHashBucketFast" & opAttr "num_buckets" .~ num_buckets & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "The strings to assign a hash bucket." type: DT_STRING } output_arg { name: "output" description: "A Tensor of the same shape as the input `string_tensor`." type: DT_INT64 } attr { name: "num_buckets" type: "int" description: "The number of buckets." has_minimum: true minimum: 1 } -} -- | Converts each string in the input Tensor to its hash mod by a number of buckets. -- -- The hash function is deterministic on the content of the string within the -- process. The hash function is a keyed hash function, where attribute `key` -- defines the key of the hash function. `key` is an array of 2 elements. -- -- A strong hash is important when inputs may be malicious, e.g. URLs with -- additional components. Adversaries could try to make their inputs hash to the -- same bucket for a denial-of-service attack or to skew the results. A strong -- hash prevents this by making it difficult, if not infeasible, to compute inputs -- that hash to the same bucket. This comes at a cost of roughly 4x higher compute -- time than `tf.string_to_hash_bucket_fast`. stringToHashBucketStrong :: Data.Int.Int64 -- ^ __num_buckets__: The number of buckets. -> Tensor v'1 Data.ByteString.ByteString -- ^ __input__: The strings to assign a hash bucket. -> Tensor Build Data.Int.Int64 -- ^ __output__: A Tensor of the same shape as the input `string_tensor`. stringToHashBucketStrong = stringToHashBucketStrong' id stringToHashBucketStrong' :: OpParams -> Data.Int.Int64 -- ^ __num_buckets__: The number of buckets. -> Tensor v'1 Data.ByteString.ByteString -- ^ __input__: The strings to assign a hash bucket. -> Tensor Build Data.Int.Int64 -- ^ __output__: A Tensor of the same shape as the input `string_tensor`. stringToHashBucketStrong' op'options num_buckets input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "StringToHashBucketStrong" & opAttr "num_buckets" .~ num_buckets & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "The strings to assign a hash bucket." type: DT_STRING } output_arg { name: "output" description: "A Tensor of the same shape as the input `string_tensor`." type: DT_INT64 } attr { name: "num_buckets" type: "int" description: "The number of buckets." has_minimum: true minimum: 1 } attr { name: "key" type: "list(int)" description: "The key for the keyed hash function passed as a list of two uint64\nelements." } -} -- | Converts each string in the input Tensor to the specified numeric type. -- -- (Note that int32 overflow results in an error while float overflow -- results in a rounded value.) stringToNumber :: forall v'1 out_type . (OneOf '[Data.Int.Int32, Float] out_type) => Tensor v'1 Data.ByteString.ByteString -- ^ __string_tensor__ -> Tensor Build out_type -- ^ __output__: A Tensor of the same shape as the input `string_tensor`. stringToNumber = stringToNumber' id stringToNumber' :: forall v'1 out_type . (OneOf '[Data.Int.Int32, Float] out_type) => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __string_tensor__ -> Tensor Build out_type -- ^ __output__: A Tensor of the same shape as the input `string_tensor`. stringToNumber' op'options string_tensor | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs string_tensor] return (opDef "StringToNumber" & opAttr "out_type" .~ tensorType (undefined :: out_type) & op'options & opInputs .~ op'inputs) {- input_arg { name: "string_tensor" type: DT_STRING } output_arg { name: "output" description: "A Tensor of the same shape as the input `string_tensor`." type_attr: "out_type" } attr { name: "out_type" type: "type" default_value { type: DT_FLOAT } description: "The numeric type to interpret each string in `string_tensor` as." allowed_values { list { type: DT_FLOAT type: DT_INT32 } } } -} -- | Returns x - y element-wise. -- -- *NOTE*: `Sub` supports broadcasting. More about broadcasting -- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) sub :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ sub = sub' id sub' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ sub' op'options x y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y] return (opDef "Sub" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | Return substrings from `Tensor` of strings. -- -- For each string in the input `Tensor`, creates a substring starting at index -- `pos` with a total length of `len`. -- -- If `len` defines a substring that would extend beyond the length of the input -- string, then as many characters as possible are used. -- -- If `pos` is negative or specifies a character index larger than any of the input -- strings, then an `InvalidArgumentError` is thrown. -- -- `pos` and `len` must have the same shape, otherwise a `ValueError` is thrown on -- Op creation. -- -- *NOTE*: `Substr` supports broadcasting up to two dimensions. More about -- broadcasting -- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) -- -- --- -- -- Examples -- -- Using scalar `pos` and `len`: -- -- ``` -- input = [b'Hello', b'World'] -- position = 1 -- length = 3 -- -- output = [b'ell', b'orl'] -- ``` -- -- Using `pos` and `len` with same shape as `input`: -- -- ``` -- input = [[b'ten', b'eleven', b'twelve'], -- [b'thirteen', b'fourteen', b'fifteen'], -- [b'sixteen', b'seventeen', b'eighteen']] -- position = [[1, 2, 3], -- [1, 2, 3], -- [1, 2, 3]] -- length = [[2, 3, 4], -- [4, 3, 2], -- [5, 5, 5]] -- -- output = [[b'en', b'eve', b'lve'], -- [b'hirt', b'urt', b'te'], -- [b'ixtee', b'vente', b'hteen']] -- ``` -- -- Broadcasting `pos` and `len` onto `input`: -- -- ``` -- input = [[b'ten', b'eleven', b'twelve'], -- [b'thirteen', b'fourteen', b'fifteen'], -- [b'sixteen', b'seventeen', b'eighteen'], -- [b'nineteen', b'twenty', b'twentyone']] -- position = [1, 2, 3] -- length = [1, 2, 3] -- -- output = [[b'e', b'ev', b'lve'], -- [b'h', b'ur', b'tee'], -- [b'i', b've', b'hte'], -- [b'i', b'en', b'nty']] -- ``` -- -- Broadcasting `input` onto `pos` and `len`: -- -- ``` -- input = b'thirteen' -- position = [1, 5, 7] -- length = [3, 2, 1] -- -- output = [b'hir', b'ee', b'n"] -- ``` substr :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int32, Data.Int.Int64] t) => Tensor v'1 Data.ByteString.ByteString -- ^ __input__: Tensor of strings -> Tensor v'2 t -- ^ __pos__: Scalar defining the position of first character in each substring -> Tensor v'3 t -- ^ __len__: Scalar defining the number of characters to include in each substring -> Tensor Build Data.ByteString.ByteString -- ^ __output__: Tensor of substrings substr = substr' id substr' :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int32, Data.Int.Int64] t) => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __input__: Tensor of strings -> Tensor v'2 t -- ^ __pos__: Scalar defining the position of first character in each substring -> Tensor v'3 t -- ^ __len__: Scalar defining the number of characters to include in each substring -> Tensor Build Data.ByteString.ByteString -- ^ __output__: Tensor of substrings substr' op'options input pos len | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs pos, buildInputs len] return (opDef "Substr" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "Tensor of strings" type: DT_STRING } input_arg { name: "pos" description: "Scalar defining the position of first character in each substring" type_attr: "T" } input_arg { name: "len" description: "Scalar defining the number of characters to include in each substring" type_attr: "T" } output_arg { name: "output" description: "Tensor of substrings" type: DT_STRING } attr { name: "T" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | Computes the sum of elements across dimensions of a tensor. -- -- Reduces `input` along the dimensions given in `reduction_indices`. Unless -- `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in -- `reduction_indices`. If `keep_dims` is true, the reduced dimensions are -- retained with length 1. sum :: forall v'1 v'2 t tidx . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => Tensor v'1 t -- ^ __input__: The tensor to reduce. -> Tensor v'2 tidx -- ^ __reduction_indices__: The dimensions to reduce. -> Tensor Build t -- ^ __output__: The reduced tensor. sum = sum' id sum' :: forall v'1 v'2 t tidx . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => OpParams -> Tensor v'1 t -- ^ __input__: The tensor to reduce. -> Tensor v'2 tidx -- ^ __reduction_indices__: The dimensions to reduce. -> Tensor Build t -- ^ __output__: The reduced tensor. sum' op'options input reduction_indices | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs reduction_indices] return (opDef "Sum" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tidx" .~ tensorType (undefined :: tidx) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "The tensor to reduce." type_attr: "T" } input_arg { name: "reduction_indices" description: "The dimensions to reduce." type_attr: "Tidx" } output_arg { name: "output" description: "The reduced tensor." type_attr: "T" } attr { name: "keep_dims" type: "bool" default_value { b: false } description: "If true, retain reduced dimensions with length 1." } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "Tidx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | Computes the singular value decompositions of one or more matrices. -- -- Computes the SVD of each inner matrix in `input` such that -- `input[..., :, :] = u[..., :, :] * diag(s[..., :, :]) * transpose(v[..., :, :])` -- -- ```prettyprint -- # a is a tensor containing a batch of matrices. -- # s is a tensor of singular values for each matrix. -- # u is the tensor containing of left singular vectors for each matrix. -- # v is the tensor containing of right singular vectors for each matrix. -- s, u, v = svd(a) -- s, _, _ = svd(a, compute_uv=False) -- ``` svd :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Double, Float] t) => Tensor v'1 t -- ^ __input__: A tensor of shape `[..., M, N]` whose inner-most 2 dimensions -- form matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`. -> (Tensor Build t, Tensor Build t, Tensor Build t) -- ^ (__s__, __u__, __v__) -- -- * __s__: Singular values. Shape is `[..., P]`. -- -- * __u__: Left singular vectors. If `full_matrices` is `False` then shape is -- `[..., M, P]`; if `full_matrices` is `True` then shape is -- `[..., M, M]`. Undefined if `compute_uv` is `False`. -- -- * __v__: Left singular vectors. If `full_matrices` is `False` then shape is -- `[..., N, P]`. If `full_matrices` is `True` then shape is `[..., N, N]`. -- Undefined if `compute_uv` is false. svd = svd' id svd' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__: A tensor of shape `[..., M, N]` whose inner-most 2 dimensions -- form matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`. -> (Tensor Build t, Tensor Build t, Tensor Build t) -- ^ (__s__, __u__, __v__) -- -- * __s__: Singular values. Shape is `[..., P]`. -- -- * __u__: Left singular vectors. If `full_matrices` is `False` then shape is -- `[..., M, P]`; if `full_matrices` is `True` then shape is -- `[..., M, M]`. Undefined if `compute_uv` is `False`. -- -- * __v__: Left singular vectors. If `full_matrices` is `False` then shape is -- `[..., N, P]`. If `full_matrices` is `True` then shape is `[..., N, N]`. -- Undefined if `compute_uv` is false. svd' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "Svd" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "A tensor of shape `[..., M, N]` whose inner-most 2 dimensions\nform matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`." type_attr: "T" } output_arg { name: "s" description: "Singular values. Shape is `[..., P]`." type_attr: "T" } output_arg { name: "u" description: "Left singular vectors. If `full_matrices` is `False` then shape is\n`[..., M, P]`; if `full_matrices` is `True` then shape is\n`[..., M, M]`. Undefined if `compute_uv` is `False`." type_attr: "T" } output_arg { name: "v" description: "Left singular vectors. If `full_matrices` is `False` then shape is\n`[..., N, P]`. If `full_matrices` is `True` then shape is `[..., N, N]`.\nUndefined if `compute_uv` is false." type_attr: "T" } attr { name: "compute_uv" type: "bool" default_value { b: true } description: "If true, left and right singular vectors will be\ncomputed and returned in `u` and `v`, respectively.\nIf false, `u` and `v` are not set and should never referenced." } attr { name: "full_matrices" type: "bool" default_value { b: false } description: "If true, compute full-sized `u` and `v`. If false\n(the default), compute only the leading `P` singular vectors.\nIgnored if `compute_uv` is `False`." } attr { name: "T" type: "type" allowed_values { list { type: DT_DOUBLE type: DT_FLOAT type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | Forwards `data` to the output port determined by `pred`. -- -- If `pred` is true, the `data` input is forwarded to `output_true`. Otherwise, -- the data goes to `output_false`. -- -- See also `RefSwitch` and `Merge`. switch :: forall v'1 v'2 t . (TensorType t) => Tensor v'1 t -- ^ __data__: The tensor to be forwarded to the appropriate output. -> Tensor v'2 Bool -- ^ __pred__: A scalar that specifies which output port will receive data. -> (Tensor Build t, Tensor Build t) -- ^ (__output_false__, __output_true__) -- -- * __output_false__: If `pred` is false, data will be forwarded to this output. -- -- * __output_true__: If `pred` is true, data will be forwarded to this output. switch = switch' id switch' :: forall v'1 v'2 t . (TensorType t) => OpParams -> Tensor v'1 t -- ^ __data__: The tensor to be forwarded to the appropriate output. -> Tensor v'2 Bool -- ^ __pred__: A scalar that specifies which output port will receive data. -> (Tensor Build t, Tensor Build t) -- ^ (__output_false__, __output_true__) -- -- * __output_false__: If `pred` is false, data will be forwarded to this output. -- -- * __output_true__: If `pred` is true, data will be forwarded to this output. switch' op'options data' pred | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs data', buildInputs pred] return (opDef "Switch" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "data" description: "The tensor to be forwarded to the appropriate output." type_attr: "T" } input_arg { name: "pred" description: "A scalar that specifies which output port will receive data." type: DT_BOOL } output_arg { name: "output_false" description: "If `pred` is false, data will be forwarded to this output." type_attr: "T" } output_arg { name: "output_true" description: "If `pred` is true, data will be forwarded to this output." type_attr: "T" } attr { name: "T" type: "type" } -} -- | A Reader that outputs the records from a TensorFlow Records file. tFRecordReader :: forall m' . (MonadBuild m') => m' (Tensor Ref Data.ByteString.ByteString) -- ^ __reader_handle__: The handle to reference the Reader. tFRecordReader = tFRecordReader' id tFRecordReader' :: forall m' . (MonadBuild m') => OpParams -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __reader_handle__: The handle to reference the Reader. tFRecordReader' op'options | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "TFRecordReader" & op'options & opInputs .~ op'inputs) {- output_arg { name: "reader_handle" description: "The handle to reference the Reader." type: DT_STRING is_ref: true } attr { name: "container" type: "string" default_value { s: "" } description: "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used." } attr { name: "shared_name" type: "string" default_value { s: "" } description: "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead." } attr { name: "compression_type" type: "string" default_value { s: "" } } -} -- | A Reader that outputs the records from a TensorFlow Records file. tFRecordReaderV2 :: forall m' . (MonadBuild m') => m' (Tensor Value ResourceHandle) -- ^ __reader_handle__: The handle to reference the Reader. tFRecordReaderV2 = tFRecordReaderV2' id tFRecordReaderV2' :: forall m' . (MonadBuild m') => OpParams -> m' (Tensor Value ResourceHandle) -- ^ __reader_handle__: The handle to reference the Reader. tFRecordReaderV2' op'options | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "TFRecordReaderV2" & op'options & opInputs .~ op'inputs) {- output_arg { name: "reader_handle" description: "The handle to reference the Reader." type: DT_RESOURCE } attr { name: "container" type: "string" default_value { s: "" } description: "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used." } attr { name: "shared_name" type: "string" default_value { s: "" } description: "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead." } attr { name: "compression_type" type: "string" default_value { s: "" } } -} -- | Read `SparseTensors` from a `SparseTensorsMap` and concatenate them. -- -- The input `sparse_handles` must be an `int64` matrix of shape `[N, 1]` where -- `N` is the minibatch size and the rows correspond to the output handles of -- `AddSparseToTensorsMap` or `AddManySparseToTensorsMap`. The ranks of the -- original `SparseTensor` objects that went into the given input ops must all -- match. When the final `SparseTensor` is created, it has rank one -- higher than the ranks of the incoming `SparseTensor` objects -- (they have been concatenated along a new row dimension on the left). -- -- The output `SparseTensor` object's shape values for all dimensions but the -- first are the max across the input `SparseTensor` objects' shape values -- for the corresponding dimensions. Its first shape value is `N`, the minibatch -- size. -- -- The input `SparseTensor` objects' indices are assumed ordered in -- standard lexicographic order. If this is not the case, after this -- step run `SparseReorder` to restore index ordering. -- -- For example, if the handles represent an input, which is a `[2, 3]` matrix -- representing two original `SparseTensor` objects: -- -- ``` -- index = [ 0] -- [10] -- [20] -- values = [1, 2, 3] -- shape = [50] -- ``` -- -- and -- -- ``` -- index = [ 2] -- [10] -- values = [4, 5] -- shape = [30] -- ``` -- -- then the final `SparseTensor` will be: -- -- ``` -- index = [0 0] -- [0 10] -- [0 20] -- [1 2] -- [1 10] -- values = [1, 2, 3, 4, 5] -- shape = [2 50] -- ``` takeManySparseFromTensorsMap :: forall v'1 dtype m' . (MonadBuild m', TensorType dtype) => Tensor v'1 Data.Int.Int64 -- ^ __sparse_handles__: 1-D, The `N` serialized `SparseTensor` objects. -- Shape: `[N]`. -> m' ((Tensor Value Data.Int.Int64, Tensor Value dtype, Tensor Value Data.Int.Int64)) -- ^ (__sparse_indices__, __sparse_values__, __sparse_shape__) -- -- * __sparse_indices__: 2-D. The `indices` of the minibatch `SparseTensor`. -- -- * __sparse_values__: 1-D. The `values` of the minibatch `SparseTensor`. -- -- * __sparse_shape__: 1-D. The `shape` of the minibatch `SparseTensor`. takeManySparseFromTensorsMap = takeManySparseFromTensorsMap' id takeManySparseFromTensorsMap' :: forall v'1 dtype m' . (MonadBuild m', TensorType dtype) => OpParams -> Tensor v'1 Data.Int.Int64 -- ^ __sparse_handles__: 1-D, The `N` serialized `SparseTensor` objects. -- Shape: `[N]`. -> m' ((Tensor Value Data.Int.Int64, Tensor Value dtype, Tensor Value Data.Int.Int64)) -- ^ (__sparse_indices__, __sparse_values__, __sparse_shape__) -- -- * __sparse_indices__: 2-D. The `indices` of the minibatch `SparseTensor`. -- -- * __sparse_values__: 1-D. The `values` of the minibatch `SparseTensor`. -- -- * __sparse_shape__: 1-D. The `shape` of the minibatch `SparseTensor`. takeManySparseFromTensorsMap' op'options sparse_handles | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs sparse_handles] buildOp [] (opDef "TakeManySparseFromTensorsMap" & opAttr "dtype" .~ tensorType (undefined :: dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "sparse_handles" description: "1-D, The `N` serialized `SparseTensor` objects.\nShape: `[N]`." type: DT_INT64 } output_arg { name: "sparse_indices" description: "2-D. The `indices` of the minibatch `SparseTensor`." type: DT_INT64 } output_arg { name: "sparse_values" description: "1-D. The `values` of the minibatch `SparseTensor`." type_attr: "dtype" } output_arg { name: "sparse_shape" description: "1-D. The `shape` of the minibatch `SparseTensor`." type: DT_INT64 } attr { name: "dtype" type: "type" description: "The `dtype` of the `SparseTensor` objects stored in the\n`SparseTensorsMap`." } attr { name: "container" type: "string" default_value { s: "" } description: "The container name for the `SparseTensorsMap` read by this op." } attr { name: "shared_name" type: "string" default_value { s: "" } description: "The shared name for the `SparseTensorsMap` read by this op.\nIt should not be blank; rather the `shared_name` or unique Operation name\nof the Op that created the original `SparseTensorsMap` should be used." } -} -- | Computes tan of x element-wise. tan :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ tan = tan' id tan' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ tan' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "Tan" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | Computes hyperbolic tangent of `x` element-wise. tanh :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ tanh = tanh' id tanh' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ tanh' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "Tanh" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | Computes the gradient for the tanh of `x` wrt its input. -- -- Specifically, `grad = dy * (1 - y*y)`, where `y = tanh(x)`, and `dy` -- is the corresponding input gradient. tanhGrad :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ tanhGrad = tanhGrad' id tanhGrad' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ tanhGrad' op'options x y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y] return (opDef "TanhGrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | Returns a tensor that may be mutated, but only persists within a single step. -- -- This is an experimental op for internal use only and it is possible to use this -- op in unsafe ways. DO NOT USE unless you fully understand the risks. -- -- It is the caller's responsibility to ensure that 'ref' is eventually passed to a -- matching 'DestroyTemporaryVariable' op after all other uses have completed. -- -- Outputs a ref to the tensor state so it may be read or modified. -- -- E.g. -- var = state_ops._temporary_variable([1, 2], types.float_) -- var_name = var.op.name -- var = state_ops.assign(var, [[4.0, 5.0]]) -- var = state_ops.assign_add(var, [[6.0, 7.0]]) -- final = state_ops._destroy_temporary_variable(var, var_name=var_name) temporaryVariable :: forall dtype m' . (MonadBuild m', TensorType dtype) => Shape -- ^ __shape__: The shape of the variable tensor. -> m' (Tensor Ref dtype) -- ^ __ref__: A reference to the variable tensor. temporaryVariable = temporaryVariable' id temporaryVariable' :: forall dtype m' . (MonadBuild m', TensorType dtype) => OpParams -> Shape -- ^ __shape__: The shape of the variable tensor. -> m' (Tensor Ref dtype) -- ^ __ref__: A reference to the variable tensor. temporaryVariable' op'options shape | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "TemporaryVariable" & opAttr "dtype" .~ tensorType (undefined :: dtype) & opAttr "shape" .~ shape & op'options & opInputs .~ op'inputs) {- output_arg { name: "ref" description: "A reference to the variable tensor." type_attr: "dtype" is_ref: true } attr { name: "shape" type: "shape" description: "The shape of the variable tensor." } attr { name: "dtype" type: "type" description: "The type of elements in the variable tensor." } attr { name: "var_name" type: "string" default_value { s: "" } description: "Overrides the name used for the temporary variable resource. Default\nvalue is the name of the \'TemporaryVariable\' op (which is guaranteed unique)." } -} -- | tensorArray :: forall v'1 m' . (MonadBuild m') => DataType -- ^ __dtype__ -> Tensor v'1 Data.Int.Int32 -- ^ __size__ -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __handle__ tensorArray = tensorArray' id tensorArray' :: forall v'1 m' . (MonadBuild m') => OpParams -> DataType -- ^ __dtype__ -> Tensor v'1 Data.Int.Int32 -- ^ __size__ -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __handle__ tensorArray' op'options dtype size | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs size] buildOp [] (opDef "TensorArray" & opAttr "dtype" .~ dtype & op'options & opInputs .~ op'inputs) {- input_arg { name: "size" type: DT_INT32 } output_arg { name: "handle" type: DT_STRING is_ref: true } attr { name: "dtype" type: "type" } attr { name: "dynamic_size" type: "bool" default_value { b: false } } attr { name: "clear_after_read" type: "bool" default_value { b: true } } attr { name: "tensor_array_name" type: "string" default_value { s: "" } } attr { name: "element_shape" type: "shape" default_value { shape { unknown_rank: true } } } -} -- | tensorArrayClose :: forall m' . (MonadBuild m') => Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> m' (ControlNode) tensorArrayClose = tensorArrayClose' id tensorArrayClose' :: forall m' . (MonadBuild m') => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> m' (ControlNode) tensorArrayClose' op'options handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle] buildOp [] (opDef "TensorArrayClose" & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_STRING is_ref: true } -} -- | Deprecated. Use TensorArrayCloseV3 tensorArrayCloseV2 :: forall v'1 m' . (MonadBuild m') => Tensor v'1 Data.ByteString.ByteString -- ^ __handle__ -> m' (ControlNode) tensorArrayCloseV2 = tensorArrayCloseV2' id tensorArrayCloseV2' :: forall v'1 m' . (MonadBuild m') => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __handle__ -> m' (ControlNode) tensorArrayCloseV2' op'options handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle] buildOp [] (opDef "TensorArrayCloseV2" & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_STRING } -} -- | Delete the TensorArray from its resource container. This enables -- -- the user to close and release the resource in the middle of a step/run. tensorArrayCloseV3 :: forall v'1 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a TensorArray (output of TensorArray or TensorArrayGrad). -> m' (ControlNode) tensorArrayCloseV3 = tensorArrayCloseV3' id tensorArrayCloseV3' :: forall v'1 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a TensorArray (output of TensorArray or TensorArrayGrad). -> m' (ControlNode) tensorArrayCloseV3' op'options handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle] buildOp [] (opDef "TensorArrayCloseV3" & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" description: "The handle to a TensorArray (output of TensorArray or TensorArrayGrad)." type: DT_RESOURCE } -} -- | tensorArrayConcat :: forall v'2 dtype m' . (MonadBuild m', TensorType dtype) => Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Float -- ^ __flow_in__ -> m' ((Tensor Value dtype, Tensor Value Data.Int.Int64)) -- ^ (__value__, __lengths__) -- -- * __value__ -- -- * __lengths__ tensorArrayConcat = tensorArrayConcat' id tensorArrayConcat' :: forall v'2 dtype m' . (MonadBuild m', TensorType dtype) => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Float -- ^ __flow_in__ -> m' ((Tensor Value dtype, Tensor Value Data.Int.Int64)) -- ^ (__value__, __lengths__) -- -- * __value__ -- -- * __lengths__ tensorArrayConcat' op'options handle flow_in | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs flow_in] buildOp [] (opDef "TensorArrayConcat" & opAttr "dtype" .~ tensorType (undefined :: dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_STRING is_ref: true } input_arg { name: "flow_in" type: DT_FLOAT } output_arg { name: "value" type_attr: "dtype" } output_arg { name: "lengths" type: DT_INT64 } attr { name: "dtype" type: "type" } attr { name: "element_shape_except0" type: "shape" default_value { shape { unknown_rank: true } } } -} -- | Deprecated. Use TensorArrayConcatV3 tensorArrayConcatV2 :: forall v'1 v'2 dtype . (TensorType dtype) => Tensor v'1 Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Float -- ^ __flow_in__ -> (Tensor Build dtype, Tensor Build Data.Int.Int64) -- ^ (__value__, __lengths__) -- -- * __value__ -- -- * __lengths__ tensorArrayConcatV2 = tensorArrayConcatV2' id tensorArrayConcatV2' :: forall v'1 v'2 dtype . (TensorType dtype) => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Float -- ^ __flow_in__ -> (Tensor Build dtype, Tensor Build Data.Int.Int64) -- ^ (__value__, __lengths__) -- -- * __value__ -- -- * __lengths__ tensorArrayConcatV2' op'options handle flow_in | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs flow_in] return (opDef "TensorArrayConcatV2" & opAttr "dtype" .~ tensorType (undefined :: dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_STRING } input_arg { name: "flow_in" type: DT_FLOAT } output_arg { name: "value" type_attr: "dtype" } output_arg { name: "lengths" type: DT_INT64 } attr { name: "dtype" type: "type" } attr { name: "element_shape_except0" type: "shape" default_value { shape { unknown_rank: true } } } -} -- | Concat the elements from the TensorArray into value `value`. -- -- Takes `T` elements of shapes -- -- ``` -- (n0 x d0 x d1 x ...), (n1 x d0 x d1 x ...), ..., (n(T-1) x d0 x d1 x ...) -- ``` -- -- and concatenates them into a Tensor of shape: -- -- ```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)``` -- -- All elements must have the same shape (excepting the first dimension). tensorArrayConcatV3 :: forall v'1 v'2 dtype m' . (MonadBuild m', TensorType dtype) => Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a TensorArray. -> Tensor v'2 Float -- ^ __flow_in__: A float scalar that enforces proper chaining of operations. -> m' ((Tensor Value dtype, Tensor Value Data.Int.Int64)) -- ^ (__value__, __lengths__) -- -- * __value__: All of the elements in the TensorArray, concatenated along the first -- axis. -- -- * __lengths__: A vector of the row sizes of the original T elements in the -- value output. In the example above, this would be the values: -- `(n1, n2, ..., n(T-1))`. tensorArrayConcatV3 = tensorArrayConcatV3' id tensorArrayConcatV3' :: forall v'1 v'2 dtype m' . (MonadBuild m', TensorType dtype) => OpParams -> Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a TensorArray. -> Tensor v'2 Float -- ^ __flow_in__: A float scalar that enforces proper chaining of operations. -> m' ((Tensor Value dtype, Tensor Value Data.Int.Int64)) -- ^ (__value__, __lengths__) -- -- * __value__: All of the elements in the TensorArray, concatenated along the first -- axis. -- -- * __lengths__: A vector of the row sizes of the original T elements in the -- value output. In the example above, this would be the values: -- `(n1, n2, ..., n(T-1))`. tensorArrayConcatV3' op'options handle flow_in | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs flow_in] buildOp [] (opDef "TensorArrayConcatV3" & opAttr "dtype" .~ tensorType (undefined :: dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" description: "The handle to a TensorArray." type: DT_RESOURCE } input_arg { name: "flow_in" description: "A float scalar that enforces proper chaining of operations." type: DT_FLOAT } output_arg { name: "value" description: "All of the elements in the TensorArray, concatenated along the first\naxis." type_attr: "dtype" } output_arg { name: "lengths" description: "A vector of the row sizes of the original T elements in the\nvalue output. In the example above, this would be the values:\n`(n1, n2, ..., n(T-1))`." type: DT_INT64 } attr { name: "dtype" type: "type" description: "The type of the elem that is returned." } attr { name: "element_shape_except0" type: "shape" default_value { shape { unknown_rank: true } } description: "The expected shape of an element, if known,\nexcluding the first dimension. Used to validate the shapes of\nTensorArray elements. If this shape is not fully specified, concatenating\nzero-size TensorArrays is an error." } -} -- | tensorArrayGather :: forall v'2 v'3 dtype m' . (MonadBuild m', TensorType dtype) => Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __indices__ -> Tensor v'3 Float -- ^ __flow_in__ -> m' (Tensor Value dtype) -- ^ __value__ tensorArrayGather = tensorArrayGather' id tensorArrayGather' :: forall v'2 v'3 dtype m' . (MonadBuild m', TensorType dtype) => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __indices__ -> Tensor v'3 Float -- ^ __flow_in__ -> m' (Tensor Value dtype) -- ^ __value__ tensorArrayGather' op'options handle indices flow_in | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs indices, buildInputs flow_in] buildOp [] (opDef "TensorArrayGather" & opAttr "dtype" .~ tensorType (undefined :: dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_STRING is_ref: true } input_arg { name: "indices" type: DT_INT32 } input_arg { name: "flow_in" type: DT_FLOAT } output_arg { name: "value" type_attr: "dtype" } attr { name: "dtype" type: "type" } attr { name: "element_shape" type: "shape" default_value { shape { unknown_rank: true } } } -} -- | Deprecated. Use TensorArrayGatherV3 tensorArrayGatherV2 :: forall v'1 v'2 v'3 dtype . (TensorType dtype) => Tensor v'1 Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __indices__ -> Tensor v'3 Float -- ^ __flow_in__ -> Tensor Build dtype -- ^ __value__ tensorArrayGatherV2 = tensorArrayGatherV2' id tensorArrayGatherV2' :: forall v'1 v'2 v'3 dtype . (TensorType dtype) => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __indices__ -> Tensor v'3 Float -- ^ __flow_in__ -> Tensor Build dtype -- ^ __value__ tensorArrayGatherV2' op'options handle indices flow_in | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs indices, buildInputs flow_in] return (opDef "TensorArrayGatherV2" & opAttr "dtype" .~ tensorType (undefined :: dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_STRING } input_arg { name: "indices" type: DT_INT32 } input_arg { name: "flow_in" type: DT_FLOAT } output_arg { name: "value" type_attr: "dtype" } attr { name: "dtype" type: "type" } attr { name: "element_shape" type: "shape" default_value { shape { unknown_rank: true } } } -} -- | Gather specific elements from the TensorArray into output `value`. -- -- All elements selected by `indices` must have the same shape. tensorArrayGatherV3 :: forall v'1 v'2 v'3 dtype m' . (MonadBuild m', TensorType dtype) => Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a TensorArray. -> Tensor v'2 Data.Int.Int32 -- ^ __indices__: The locations in the TensorArray from which to read tensor elements. -> Tensor v'3 Float -- ^ __flow_in__: A float scalar that enforces proper chaining of operations. -> m' (Tensor Value dtype) -- ^ __value__: All of the elements in the TensorArray, concatenated along a new -- axis (the new dimension 0). tensorArrayGatherV3 = tensorArrayGatherV3' id tensorArrayGatherV3' :: forall v'1 v'2 v'3 dtype m' . (MonadBuild m', TensorType dtype) => OpParams -> Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a TensorArray. -> Tensor v'2 Data.Int.Int32 -- ^ __indices__: The locations in the TensorArray from which to read tensor elements. -> Tensor v'3 Float -- ^ __flow_in__: A float scalar that enforces proper chaining of operations. -> m' (Tensor Value dtype) -- ^ __value__: All of the elements in the TensorArray, concatenated along a new -- axis (the new dimension 0). tensorArrayGatherV3' op'options handle indices flow_in | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs indices, buildInputs flow_in] buildOp [] (opDef "TensorArrayGatherV3" & opAttr "dtype" .~ tensorType (undefined :: dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" description: "The handle to a TensorArray." type: DT_RESOURCE } input_arg { name: "indices" description: "The locations in the TensorArray from which to read tensor elements." type: DT_INT32 } input_arg { name: "flow_in" description: "A float scalar that enforces proper chaining of operations." type: DT_FLOAT } output_arg { name: "value" description: "All of the elements in the TensorArray, concatenated along a new\naxis (the new dimension 0)." type_attr: "dtype" } attr { name: "dtype" type: "type" description: "The type of the elem that is returned." } attr { name: "element_shape" type: "shape" default_value { shape { unknown_rank: true } } description: "The expected shape of an element, if known. Used to\nvalidate the shapes of TensorArray elements. If this shape is not\nfully specified, gathering zero-size TensorArrays is an error." } -} -- | tensorArrayGrad :: forall v'1 v'2 m' . (MonadBuild m') => Tensor v'1 Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Float -- ^ __flow_in__ -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __grad_handle__ tensorArrayGrad = tensorArrayGrad' id tensorArrayGrad' :: forall v'1 v'2 m' . (MonadBuild m') => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Float -- ^ __flow_in__ -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __grad_handle__ tensorArrayGrad' op'options handle flow_in | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs flow_in] buildOp [] (opDef "TensorArrayGrad" & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_STRING } input_arg { name: "flow_in" type: DT_FLOAT } output_arg { name: "grad_handle" type: DT_STRING is_ref: true } attr { name: "source" type: "string" } -} -- | Deprecated. Use TensorArrayGradV3 tensorArrayGradV2 :: forall v'1 v'2 m' . (MonadBuild m') => Tensor v'1 Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Float -- ^ __flow_in__ -> m' (Tensor Value Data.ByteString.ByteString) -- ^ __grad_handle__ tensorArrayGradV2 = tensorArrayGradV2' id tensorArrayGradV2' :: forall v'1 v'2 m' . (MonadBuild m') => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Float -- ^ __flow_in__ -> m' (Tensor Value Data.ByteString.ByteString) -- ^ __grad_handle__ tensorArrayGradV2' op'options handle flow_in | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs flow_in] buildOp [] (opDef "TensorArrayGradV2" & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_STRING } input_arg { name: "flow_in" type: DT_FLOAT } output_arg { name: "grad_handle" type: DT_STRING } attr { name: "source" type: "string" } -} -- | Creates a TensorArray for storing the gradients of values in the given handle. -- -- If the given TensorArray gradient already exists, returns a reference to it. -- -- Locks the size of the original TensorArray by disabling its dynamic size flag. -- -- **A note about the input flow_in:** -- -- The handle flow_in forces the execution of the gradient lookup to occur -- only after certain other operations have occurred. For example, when -- the forward TensorArray is dynamically sized, writes to this TensorArray -- may resize the object. The gradient TensorArray is statically sized based -- on the size of the forward TensorArray when this operation executes. -- Furthermore, the size of the forward TensorArray is frozen by this call. -- As a result, the flow is used to ensure that the call to generate the gradient -- TensorArray only happens after all writes are executed. -- -- In the case of dynamically sized TensorArrays, gradient computation should -- only be performed on read operations that have themselves been chained via -- flow to occur only after all writes have executed. That way the final size -- of the forward TensorArray is known when this operation is called. -- -- **A note about the source attribute:** -- -- TensorArray gradient calls use an accumulator TensorArray object. If -- multiple gradients are calculated and run in the same session, the multiple -- gradient nodes may accidentally flow throuth the same accumulator TensorArray. -- This double counts and generally breaks the TensorArray gradient flow. -- -- The solution is to identify which gradient call this particular -- TensorArray gradient is being called in. This is performed by identifying -- a unique string (e.g. "gradients", "gradients_1", ...) from the input -- gradient Tensor's name. This string is used as a suffix when creating -- the TensorArray gradient object here (the attribute `source`). -- -- The attribute `source` is added as a suffix to the forward TensorArray's -- name when performing the creation / lookup, so that each separate gradient -- calculation gets its own TensorArray accumulator. tensorArrayGradV3 :: forall v'1 v'2 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __handle__: The handle to the forward TensorArray. -> Tensor v'2 Float -- ^ __flow_in__: A float scalar that enforces proper chaining of operations. -> m' ((Tensor Value ResourceHandle, Tensor Value Float)) -- ^ (__grad_handle__, __flow_out__) -- -- * __grad_handle__ -- -- * __flow_out__ tensorArrayGradV3 = tensorArrayGradV3' id tensorArrayGradV3' :: forall v'1 v'2 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __handle__: The handle to the forward TensorArray. -> Tensor v'2 Float -- ^ __flow_in__: A float scalar that enforces proper chaining of operations. -> m' ((Tensor Value ResourceHandle, Tensor Value Float)) -- ^ (__grad_handle__, __flow_out__) -- -- * __grad_handle__ -- -- * __flow_out__ tensorArrayGradV3' op'options handle flow_in | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs flow_in] buildOp [] (opDef "TensorArrayGradV3" & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" description: "The handle to the forward TensorArray." type: DT_RESOURCE } input_arg { name: "flow_in" description: "A float scalar that enforces proper chaining of operations." type: DT_FLOAT } output_arg { name: "grad_handle" type: DT_RESOURCE } output_arg { name: "flow_out" type: DT_FLOAT } attr { name: "source" type: "string" description: "The gradient source string, used to decide which gradient TensorArray\nto return." } -} -- | tensorArrayPack :: forall v'2 dtype m' . (MonadBuild m', TensorType dtype) => Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Float -- ^ __flow_in__ -> m' (Tensor Value dtype) -- ^ __value__ tensorArrayPack = tensorArrayPack' id tensorArrayPack' :: forall v'2 dtype m' . (MonadBuild m', TensorType dtype) => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Float -- ^ __flow_in__ -> m' (Tensor Value dtype) -- ^ __value__ tensorArrayPack' op'options handle flow_in | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs flow_in] buildOp [] (opDef "TensorArrayPack" & opAttr "dtype" .~ tensorType (undefined :: dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_STRING is_ref: true } input_arg { name: "flow_in" type: DT_FLOAT } output_arg { name: "value" type_attr: "dtype" } attr { name: "dtype" type: "type" } attr { name: "element_shape" type: "shape" default_value { shape { unknown_rank: true } } } -} -- | tensorArrayRead :: forall v'2 v'3 dtype m' . (MonadBuild m', TensorType dtype) => Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __index__ -> Tensor v'3 Float -- ^ __flow_in__ -> m' (Tensor Value dtype) -- ^ __value__ tensorArrayRead = tensorArrayRead' id tensorArrayRead' :: forall v'2 v'3 dtype m' . (MonadBuild m', TensorType dtype) => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __index__ -> Tensor v'3 Float -- ^ __flow_in__ -> m' (Tensor Value dtype) -- ^ __value__ tensorArrayRead' op'options handle index flow_in | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs index, buildInputs flow_in] buildOp [] (opDef "TensorArrayRead" & opAttr "dtype" .~ tensorType (undefined :: dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_STRING is_ref: true } input_arg { name: "index" type: DT_INT32 } input_arg { name: "flow_in" type: DT_FLOAT } output_arg { name: "value" type_attr: "dtype" } attr { name: "dtype" type: "type" } -} -- | Deprecated. Use TensorArrayReadV3 tensorArrayReadV2 :: forall v'1 v'2 v'3 dtype . (TensorType dtype) => Tensor v'1 Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __index__ -> Tensor v'3 Float -- ^ __flow_in__ -> Tensor Build dtype -- ^ __value__ tensorArrayReadV2 = tensorArrayReadV2' id tensorArrayReadV2' :: forall v'1 v'2 v'3 dtype . (TensorType dtype) => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __index__ -> Tensor v'3 Float -- ^ __flow_in__ -> Tensor Build dtype -- ^ __value__ tensorArrayReadV2' op'options handle index flow_in | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs index, buildInputs flow_in] return (opDef "TensorArrayReadV2" & opAttr "dtype" .~ tensorType (undefined :: dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_STRING } input_arg { name: "index" type: DT_INT32 } input_arg { name: "flow_in" type: DT_FLOAT } output_arg { name: "value" type_attr: "dtype" } attr { name: "dtype" type: "type" } -} -- | Read an element from the TensorArray into output `value`. tensorArrayReadV3 :: forall v'1 v'2 v'3 dtype m' . (MonadBuild m', TensorType dtype) => Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a TensorArray. -> Tensor v'2 Data.Int.Int32 -- ^ __index__ -> Tensor v'3 Float -- ^ __flow_in__: A float scalar that enforces proper chaining of operations. -> m' (Tensor Value dtype) -- ^ __value__: The tensor that is read from the TensorArray. tensorArrayReadV3 = tensorArrayReadV3' id tensorArrayReadV3' :: forall v'1 v'2 v'3 dtype m' . (MonadBuild m', TensorType dtype) => OpParams -> Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a TensorArray. -> Tensor v'2 Data.Int.Int32 -- ^ __index__ -> Tensor v'3 Float -- ^ __flow_in__: A float scalar that enforces proper chaining of operations. -> m' (Tensor Value dtype) -- ^ __value__: The tensor that is read from the TensorArray. tensorArrayReadV3' op'options handle index flow_in | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs index, buildInputs flow_in] buildOp [] (opDef "TensorArrayReadV3" & opAttr "dtype" .~ tensorType (undefined :: dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" description: "The handle to a TensorArray." type: DT_RESOURCE } input_arg { name: "index" type: DT_INT32 } input_arg { name: "flow_in" description: "A float scalar that enforces proper chaining of operations." type: DT_FLOAT } output_arg { name: "value" description: "The tensor that is read from the TensorArray." type_attr: "dtype" } attr { name: "dtype" type: "type" description: "The type of the elem that is returned." } -} -- | tensorArrayScatter :: forall v'2 v'3 v'4 t m' . (MonadBuild m', TensorType t) => Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __indices__ -> Tensor v'3 t -- ^ __value__ -> Tensor v'4 Float -- ^ __flow_in__ -> m' (Tensor Value Float) -- ^ __flow_out__ tensorArrayScatter = tensorArrayScatter' id tensorArrayScatter' :: forall v'2 v'3 v'4 t m' . (MonadBuild m', TensorType t) => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __indices__ -> Tensor v'3 t -- ^ __value__ -> Tensor v'4 Float -- ^ __flow_in__ -> m' (Tensor Value Float) -- ^ __flow_out__ tensorArrayScatter' op'options handle indices value flow_in | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs indices, buildInputs value, buildInputs flow_in] buildOp [] (opDef "TensorArrayScatter" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_STRING is_ref: true } input_arg { name: "indices" type: DT_INT32 } input_arg { name: "value" type_attr: "T" } input_arg { name: "flow_in" type: DT_FLOAT } output_arg { name: "flow_out" type: DT_FLOAT } attr { name: "T" type: "type" } -} -- | Deprecated. Use TensorArrayScatterV3 tensorArrayScatterV2 :: forall v'1 v'2 v'3 v'4 t . (TensorType t) => Tensor v'1 Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __indices__ -> Tensor v'3 t -- ^ __value__ -> Tensor v'4 Float -- ^ __flow_in__ -> Tensor Build Float -- ^ __flow_out__ tensorArrayScatterV2 = tensorArrayScatterV2' id tensorArrayScatterV2' :: forall v'1 v'2 v'3 v'4 t . (TensorType t) => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __indices__ -> Tensor v'3 t -- ^ __value__ -> Tensor v'4 Float -- ^ __flow_in__ -> Tensor Build Float -- ^ __flow_out__ tensorArrayScatterV2' op'options handle indices value flow_in | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs indices, buildInputs value, buildInputs flow_in] return (opDef "TensorArrayScatterV2" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_STRING } input_arg { name: "indices" type: DT_INT32 } input_arg { name: "value" type_attr: "T" } input_arg { name: "flow_in" type: DT_FLOAT } output_arg { name: "flow_out" type: DT_FLOAT } attr { name: "T" type: "type" } -} -- | Scatter the data from the input value into specific TensorArray elements. -- -- `indices` must be a vector, its length must match the first dim of `value`. tensorArrayScatterV3 :: forall v'1 v'2 v'3 v'4 t m' . (MonadBuild m', TensorType t) => Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a TensorArray. -> Tensor v'2 Data.Int.Int32 -- ^ __indices__: The locations at which to write the tensor elements. -> Tensor v'3 t -- ^ __value__: The concatenated tensor to write to the TensorArray. -> Tensor v'4 Float -- ^ __flow_in__: A float scalar that enforces proper chaining of operations. -> m' (Tensor Value Float) -- ^ __flow_out__: A float scalar that enforces proper chaining of operations. tensorArrayScatterV3 = tensorArrayScatterV3' id tensorArrayScatterV3' :: forall v'1 v'2 v'3 v'4 t m' . (MonadBuild m', TensorType t) => OpParams -> Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a TensorArray. -> Tensor v'2 Data.Int.Int32 -- ^ __indices__: The locations at which to write the tensor elements. -> Tensor v'3 t -- ^ __value__: The concatenated tensor to write to the TensorArray. -> Tensor v'4 Float -- ^ __flow_in__: A float scalar that enforces proper chaining of operations. -> m' (Tensor Value Float) -- ^ __flow_out__: A float scalar that enforces proper chaining of operations. tensorArrayScatterV3' op'options handle indices value flow_in | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs indices, buildInputs value, buildInputs flow_in] buildOp [] (opDef "TensorArrayScatterV3" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" description: "The handle to a TensorArray." type: DT_RESOURCE } input_arg { name: "indices" description: "The locations at which to write the tensor elements." type: DT_INT32 } input_arg { name: "value" description: "The concatenated tensor to write to the TensorArray." type_attr: "T" } input_arg { name: "flow_in" description: "A float scalar that enforces proper chaining of operations." type: DT_FLOAT } output_arg { name: "flow_out" description: "A float scalar that enforces proper chaining of operations." type: DT_FLOAT } attr { name: "T" type: "type" } -} -- | tensorArraySize :: forall v'2 m' . (MonadBuild m') => Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Float -- ^ __flow_in__ -> m' (Tensor Value Data.Int.Int32) -- ^ __size__ tensorArraySize = tensorArraySize' id tensorArraySize' :: forall v'2 m' . (MonadBuild m') => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Float -- ^ __flow_in__ -> m' (Tensor Value Data.Int.Int32) -- ^ __size__ tensorArraySize' op'options handle flow_in | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs flow_in] buildOp [] (opDef "TensorArraySize" & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_STRING is_ref: true } input_arg { name: "flow_in" type: DT_FLOAT } output_arg { name: "size" type: DT_INT32 } -} -- | Deprecated. Use TensorArraySizeV3 tensorArraySizeV2 :: Tensor v'1 Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Float -- ^ __flow_in__ -> Tensor Build Data.Int.Int32 -- ^ __size__ tensorArraySizeV2 = tensorArraySizeV2' id tensorArraySizeV2' :: OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Float -- ^ __flow_in__ -> Tensor Build Data.Int.Int32 -- ^ __size__ tensorArraySizeV2' op'options handle flow_in | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs flow_in] return (opDef "TensorArraySizeV2" & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_STRING } input_arg { name: "flow_in" type: DT_FLOAT } output_arg { name: "size" type: DT_INT32 } -} -- | Get the current size of the TensorArray. tensorArraySizeV3 :: forall v'1 v'2 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a TensorArray (output of TensorArray or TensorArrayGrad). -> Tensor v'2 Float -- ^ __flow_in__: A float scalar that enforces proper chaining of operations. -> m' (Tensor Value Data.Int.Int32) -- ^ __size__: The current size of the TensorArray. tensorArraySizeV3 = tensorArraySizeV3' id tensorArraySizeV3' :: forall v'1 v'2 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a TensorArray (output of TensorArray or TensorArrayGrad). -> Tensor v'2 Float -- ^ __flow_in__: A float scalar that enforces proper chaining of operations. -> m' (Tensor Value Data.Int.Int32) -- ^ __size__: The current size of the TensorArray. tensorArraySizeV3' op'options handle flow_in | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs flow_in] buildOp [] (opDef "TensorArraySizeV3" & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" description: "The handle to a TensorArray (output of TensorArray or TensorArrayGrad)." type: DT_RESOURCE } input_arg { name: "flow_in" description: "A float scalar that enforces proper chaining of operations." type: DT_FLOAT } output_arg { name: "size" description: "The current size of the TensorArray." type: DT_INT32 } -} -- | tensorArraySplit :: forall v'2 v'3 v'4 t m' . (MonadBuild m', TensorType t) => Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 t -- ^ __value__ -> Tensor v'3 Data.Int.Int64 -- ^ __lengths__ -> Tensor v'4 Float -- ^ __flow_in__ -> m' (Tensor Value Float) -- ^ __flow_out__ tensorArraySplit = tensorArraySplit' id tensorArraySplit' :: forall v'2 v'3 v'4 t m' . (MonadBuild m', TensorType t) => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 t -- ^ __value__ -> Tensor v'3 Data.Int.Int64 -- ^ __lengths__ -> Tensor v'4 Float -- ^ __flow_in__ -> m' (Tensor Value Float) -- ^ __flow_out__ tensorArraySplit' op'options handle value lengths flow_in | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs value, buildInputs lengths, buildInputs flow_in] buildOp [] (opDef "TensorArraySplit" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_STRING is_ref: true } input_arg { name: "value" type_attr: "T" } input_arg { name: "lengths" type: DT_INT64 } input_arg { name: "flow_in" type: DT_FLOAT } output_arg { name: "flow_out" type: DT_FLOAT } attr { name: "T" type: "type" } -} -- | Deprecated. Use TensorArraySplitV3 tensorArraySplitV2 :: forall v'1 v'2 v'3 v'4 t . (TensorType t) => Tensor v'1 Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 t -- ^ __value__ -> Tensor v'3 Data.Int.Int64 -- ^ __lengths__ -> Tensor v'4 Float -- ^ __flow_in__ -> Tensor Build Float -- ^ __flow_out__ tensorArraySplitV2 = tensorArraySplitV2' id tensorArraySplitV2' :: forall v'1 v'2 v'3 v'4 t . (TensorType t) => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 t -- ^ __value__ -> Tensor v'3 Data.Int.Int64 -- ^ __lengths__ -> Tensor v'4 Float -- ^ __flow_in__ -> Tensor Build Float -- ^ __flow_out__ tensorArraySplitV2' op'options handle value lengths flow_in | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs value, buildInputs lengths, buildInputs flow_in] return (opDef "TensorArraySplitV2" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_STRING } input_arg { name: "value" type_attr: "T" } input_arg { name: "lengths" type: DT_INT64 } input_arg { name: "flow_in" type: DT_FLOAT } output_arg { name: "flow_out" type: DT_FLOAT } attr { name: "T" type: "type" } -} -- | Split the data from the input value into TensorArray elements. -- -- Assuming that `lengths` takes on values -- -- ```(n0, n1, ..., n(T-1))``` -- -- and that `value` has shape -- -- ```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)```, -- -- this splits values into a TensorArray with T tensors. -- -- TensorArray index t will be the subtensor of values with starting position -- -- ```(n0 + n1 + ... + n(t-1), 0, 0, ...)``` -- -- and having size -- -- ```nt x d0 x d1 x ...``` tensorArraySplitV3 :: forall v'1 v'2 v'3 v'4 t m' . (MonadBuild m', TensorType t) => Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a TensorArray. -> Tensor v'2 t -- ^ __value__: The concatenated tensor to write to the TensorArray. -> Tensor v'3 Data.Int.Int64 -- ^ __lengths__: The vector of lengths, how to split the rows of value into the -- TensorArray. -> Tensor v'4 Float -- ^ __flow_in__: A float scalar that enforces proper chaining of operations. -> m' (Tensor Value Float) -- ^ __flow_out__: A float scalar that enforces proper chaining of operations. tensorArraySplitV3 = tensorArraySplitV3' id tensorArraySplitV3' :: forall v'1 v'2 v'3 v'4 t m' . (MonadBuild m', TensorType t) => OpParams -> Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a TensorArray. -> Tensor v'2 t -- ^ __value__: The concatenated tensor to write to the TensorArray. -> Tensor v'3 Data.Int.Int64 -- ^ __lengths__: The vector of lengths, how to split the rows of value into the -- TensorArray. -> Tensor v'4 Float -- ^ __flow_in__: A float scalar that enforces proper chaining of operations. -> m' (Tensor Value Float) -- ^ __flow_out__: A float scalar that enforces proper chaining of operations. tensorArraySplitV3' op'options handle value lengths flow_in | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs value, buildInputs lengths, buildInputs flow_in] buildOp [] (opDef "TensorArraySplitV3" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" description: "The handle to a TensorArray." type: DT_RESOURCE } input_arg { name: "value" description: "The concatenated tensor to write to the TensorArray." type_attr: "T" } input_arg { name: "lengths" description: "The vector of lengths, how to split the rows of value into the\nTensorArray." type: DT_INT64 } input_arg { name: "flow_in" description: "A float scalar that enforces proper chaining of operations." type: DT_FLOAT } output_arg { name: "flow_out" description: "A float scalar that enforces proper chaining of operations." type: DT_FLOAT } attr { name: "T" type: "type" } -} -- | tensorArrayUnpack :: forall v'2 v'3 t m' . (MonadBuild m', TensorType t) => Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 t -- ^ __value__ -> Tensor v'3 Float -- ^ __flow_in__ -> m' (Tensor Value Float) -- ^ __flow_out__ tensorArrayUnpack = tensorArrayUnpack' id tensorArrayUnpack' :: forall v'2 v'3 t m' . (MonadBuild m', TensorType t) => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 t -- ^ __value__ -> Tensor v'3 Float -- ^ __flow_in__ -> m' (Tensor Value Float) -- ^ __flow_out__ tensorArrayUnpack' op'options handle value flow_in | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs value, buildInputs flow_in] buildOp [] (opDef "TensorArrayUnpack" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_STRING is_ref: true } input_arg { name: "value" type_attr: "T" } input_arg { name: "flow_in" type: DT_FLOAT } output_arg { name: "flow_out" type: DT_FLOAT } attr { name: "T" type: "type" } -} -- | Deprecated. Use TensorArrayV3 tensorArrayV2 :: forall v'1 m' . (MonadBuild m') => DataType -- ^ __dtype__ -> Tensor v'1 Data.Int.Int32 -- ^ __size__ -> m' (Tensor Value Data.ByteString.ByteString) -- ^ __handle__ tensorArrayV2 = tensorArrayV2' id tensorArrayV2' :: forall v'1 m' . (MonadBuild m') => OpParams -> DataType -- ^ __dtype__ -> Tensor v'1 Data.Int.Int32 -- ^ __size__ -> m' (Tensor Value Data.ByteString.ByteString) -- ^ __handle__ tensorArrayV2' op'options dtype size | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs size] buildOp [] (opDef "TensorArrayV2" & opAttr "dtype" .~ dtype & op'options & opInputs .~ op'inputs) {- input_arg { name: "size" type: DT_INT32 } output_arg { name: "handle" type: DT_STRING } attr { name: "dtype" type: "type" } attr { name: "element_shape" type: "shape" default_value { shape { unknown_rank: true } } } attr { name: "dynamic_size" type: "bool" default_value { b: false } } attr { name: "clear_after_read" type: "bool" default_value { b: true } } attr { name: "tensor_array_name" type: "string" default_value { s: "" } } -} -- | An array of Tensors of given size, with data written via Write and read -- -- via Read or Pack. tensorArrayV3 :: forall v'1 m' . (MonadBuild m') => DataType -- ^ __dtype__: The type of the elements on the tensor_array. -> Tensor v'1 Data.Int.Int32 -- ^ __size__: The size of the array. -> m' ((Tensor Value ResourceHandle, Tensor Value Float)) -- ^ (__handle__, __flow__) -- -- * __handle__: The handle to the TensorArray. -- -- * __flow__: A scalar used to control gradient flow. tensorArrayV3 = tensorArrayV3' id tensorArrayV3' :: forall v'1 m' . (MonadBuild m') => OpParams -> DataType -- ^ __dtype__: The type of the elements on the tensor_array. -> Tensor v'1 Data.Int.Int32 -- ^ __size__: The size of the array. -> m' ((Tensor Value ResourceHandle, Tensor Value Float)) -- ^ (__handle__, __flow__) -- -- * __handle__: The handle to the TensorArray. -- -- * __flow__: A scalar used to control gradient flow. tensorArrayV3' op'options dtype size | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs size] buildOp [] (opDef "TensorArrayV3" & opAttr "dtype" .~ dtype & op'options & opInputs .~ op'inputs) {- input_arg { name: "size" description: "The size of the array." type: DT_INT32 } output_arg { name: "handle" description: "The handle to the TensorArray." type: DT_RESOURCE } output_arg { name: "flow" description: "A scalar used to control gradient flow." type: DT_FLOAT } attr { name: "dtype" type: "type" description: "The type of the elements on the tensor_array." } attr { name: "element_shape" type: "shape" default_value { shape { unknown_rank: true } } description: "The expected shape of an element, if known. Used to\nvalidate the shapes of TensorArray elements. If this shape is not\nfully specified, gathering zero-size TensorArrays is an error." } attr { name: "dynamic_size" type: "bool" default_value { b: false } description: "A boolean that determines whether writes to the TensorArray\nare allowed to grow the size. By default, this is not allowed." } attr { name: "clear_after_read" type: "bool" default_value { b: true } description: "If true (default), Tensors in the TensorArray are cleared\nafter being read. This disables multiple read semantics but allows early\nrelease of memory." } attr { name: "tensor_array_name" type: "string" default_value { s: "" } description: "Overrides the name used for the temporary tensor_array\nresource. Default value is the name of the \'TensorArray\' op (which\nis guaranteed unique)." } -} -- | tensorArrayWrite :: forall v'2 v'3 v'4 t m' . (MonadBuild m', TensorType t) => Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __index__ -> Tensor v'3 t -- ^ __value__ -> Tensor v'4 Float -- ^ __flow_in__ -> m' (Tensor Value Float) -- ^ __flow_out__ tensorArrayWrite = tensorArrayWrite' id tensorArrayWrite' :: forall v'2 v'3 v'4 t m' . (MonadBuild m', TensorType t) => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __index__ -> Tensor v'3 t -- ^ __value__ -> Tensor v'4 Float -- ^ __flow_in__ -> m' (Tensor Value Float) -- ^ __flow_out__ tensorArrayWrite' op'options handle index value flow_in | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs index, buildInputs value, buildInputs flow_in] buildOp [] (opDef "TensorArrayWrite" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_STRING is_ref: true } input_arg { name: "index" type: DT_INT32 } input_arg { name: "value" type_attr: "T" } input_arg { name: "flow_in" type: DT_FLOAT } output_arg { name: "flow_out" type: DT_FLOAT } attr { name: "T" type: "type" } -} -- | Deprecated. Use TensorArrayGradV3 tensorArrayWriteV2 :: forall v'1 v'2 v'3 v'4 t . (TensorType t) => Tensor v'1 Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __index__ -> Tensor v'3 t -- ^ __value__ -> Tensor v'4 Float -- ^ __flow_in__ -> Tensor Build Float -- ^ __flow_out__ tensorArrayWriteV2 = tensorArrayWriteV2' id tensorArrayWriteV2' :: forall v'1 v'2 v'3 v'4 t . (TensorType t) => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __index__ -> Tensor v'3 t -- ^ __value__ -> Tensor v'4 Float -- ^ __flow_in__ -> Tensor Build Float -- ^ __flow_out__ tensorArrayWriteV2' op'options handle index value flow_in | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs index, buildInputs value, buildInputs flow_in] return (opDef "TensorArrayWriteV2" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_STRING } input_arg { name: "index" type: DT_INT32 } input_arg { name: "value" type_attr: "T" } input_arg { name: "flow_in" type: DT_FLOAT } output_arg { name: "flow_out" type: DT_FLOAT } attr { name: "T" type: "type" } -} -- | Push an element onto the tensor_array. tensorArrayWriteV3 :: forall v'1 v'2 v'3 v'4 t m' . (MonadBuild m', TensorType t) => Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a TensorArray. -> Tensor v'2 Data.Int.Int32 -- ^ __index__: The position to write to inside the TensorArray. -> Tensor v'3 t -- ^ __value__: The tensor to write to the TensorArray. -> Tensor v'4 Float -- ^ __flow_in__: A float scalar that enforces proper chaining of operations. -> m' (Tensor Value Float) -- ^ __flow_out__: A float scalar that enforces proper chaining of operations. tensorArrayWriteV3 = tensorArrayWriteV3' id tensorArrayWriteV3' :: forall v'1 v'2 v'3 v'4 t m' . (MonadBuild m', TensorType t) => OpParams -> Tensor v'1 ResourceHandle -- ^ __handle__: The handle to a TensorArray. -> Tensor v'2 Data.Int.Int32 -- ^ __index__: The position to write to inside the TensorArray. -> Tensor v'3 t -- ^ __value__: The tensor to write to the TensorArray. -> Tensor v'4 Float -- ^ __flow_in__: A float scalar that enforces proper chaining of operations. -> m' (Tensor Value Float) -- ^ __flow_out__: A float scalar that enforces proper chaining of operations. tensorArrayWriteV3' op'options handle index value flow_in | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs index, buildInputs value, buildInputs flow_in] buildOp [] (opDef "TensorArrayWriteV3" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" description: "The handle to a TensorArray." type: DT_RESOURCE } input_arg { name: "index" description: "The position to write to inside the TensorArray." type: DT_INT32 } input_arg { name: "value" description: "The tensor to write to the TensorArray." type_attr: "T" } input_arg { name: "flow_in" description: "A float scalar that enforces proper chaining of operations." type: DT_FLOAT } output_arg { name: "flow_out" description: "A float scalar that enforces proper chaining of operations." type: DT_FLOAT } attr { name: "T" type: "type" } -} -- | Outputs a `Summary` protocol buffer with a tensor. tensorSummary :: forall v'1 t . (TensorType t) => Tensor v'1 t -- ^ __tensor__: A tensor to serialize. -> Tensor Build Data.ByteString.ByteString -- ^ __summary__ tensorSummary = tensorSummary' id tensorSummary' :: forall v'1 t . (TensorType t) => OpParams -> Tensor v'1 t -- ^ __tensor__: A tensor to serialize. -> Tensor Build Data.ByteString.ByteString -- ^ __summary__ tensorSummary' op'options tensor | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs tensor] return (opDef "TensorSummary" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "tensor" description: "A tensor to serialize." type_attr: "T" } output_arg { name: "summary" type: DT_STRING } attr { name: "T" type: "type" } attr { name: "description" type: "string" default_value { s: "" } description: "A json-encoded SummaryDescription proto." } attr { name: "labels" type: "list(string)" default_value { list { } } description: "An unused list of strings." } attr { name: "display_name" type: "string" default_value { s: "" } description: "An unused string." } -} -- | A Reader that outputs the lines of a file delimited by '\n'. textLineReader :: forall m' . (MonadBuild m') => m' (Tensor Ref Data.ByteString.ByteString) -- ^ __reader_handle__: The handle to reference the Reader. textLineReader = textLineReader' id textLineReader' :: forall m' . (MonadBuild m') => OpParams -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __reader_handle__: The handle to reference the Reader. textLineReader' op'options | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "TextLineReader" & op'options & opInputs .~ op'inputs) {- output_arg { name: "reader_handle" description: "The handle to reference the Reader." type: DT_STRING is_ref: true } attr { name: "skip_header_lines" type: "int" default_value { i: 0 } description: "Number of lines to skip from the beginning of every file." } attr { name: "container" type: "string" default_value { s: "" } description: "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used." } attr { name: "shared_name" type: "string" default_value { s: "" } description: "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead." } -} -- | A Reader that outputs the lines of a file delimited by '\n'. textLineReaderV2 :: forall m' . (MonadBuild m') => m' (Tensor Value ResourceHandle) -- ^ __reader_handle__: The handle to reference the Reader. textLineReaderV2 = textLineReaderV2' id textLineReaderV2' :: forall m' . (MonadBuild m') => OpParams -> m' (Tensor Value ResourceHandle) -- ^ __reader_handle__: The handle to reference the Reader. textLineReaderV2' op'options | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "TextLineReaderV2" & op'options & opInputs .~ op'inputs) {- output_arg { name: "reader_handle" description: "The handle to reference the Reader." type: DT_RESOURCE } attr { name: "skip_header_lines" type: "int" default_value { i: 0 } description: "Number of lines to skip from the beginning of every file." } attr { name: "container" type: "string" default_value { s: "" } description: "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used." } attr { name: "shared_name" type: "string" default_value { s: "" } description: "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead." } -} -- | Generates labels for candidate sampling with a learned unigram distribution. -- -- See explanations of candidate sampling and the data formats at -- go/candidate-sampling. -- -- For each batch, this op picks a single set of sampled candidate labels. -- -- The advantages of sampling candidates per-batch are simplicity and the -- possibility of efficient dense matrix multiplication. The disadvantage is that -- the sampled candidates must be chosen independently of the context and of the -- true labels. threadUnsafeUnigramCandidateSampler :: Data.Int.Int64 -- ^ __num_sampled__: Number of candidates to randomly sample per batch. -> Data.Int.Int64 -- ^ __num_true__: Number of true labels per context. -> Data.Int.Int64 -- ^ __range_max__: The sampler will sample integers from the interval [0, range_max). -> Bool -- ^ __unique__: If unique is true, we sample with rejection, so that all sampled -- candidates in a batch are unique. This requires some approximation to -- estimate the post-rejection sampling probabilities. -> Tensor v'1 Data.Int.Int64 -- ^ __true_classes__: A batch_size * num_true matrix, in which each row contains the -- IDs of the num_true target_classes in the corresponding original label. -> (Tensor Build Data.Int.Int64, Tensor Build Float, Tensor Build Float) -- ^ (__sampled_candidates__, __true_expected_count__, __sampled_expected_count__) -- -- * __sampled_candidates__: A vector of length num_sampled, in which each element is -- the ID of a sampled candidate. -- -- * __true_expected_count__: A batch_size * num_true matrix, representing -- the number of times each candidate is expected to occur in a batch -- of sampled candidates. If unique=true, then this is a probability. -- -- * __sampled_expected_count__: A vector of length num_sampled, for each sampled -- candidate representing the number of times the candidate is expected -- to occur in a batch of sampled candidates. If unique=true, then this is a -- probability. threadUnsafeUnigramCandidateSampler = threadUnsafeUnigramCandidateSampler' id threadUnsafeUnigramCandidateSampler' :: OpParams -> Data.Int.Int64 -- ^ __num_sampled__: Number of candidates to randomly sample per batch. -> Data.Int.Int64 -- ^ __num_true__: Number of true labels per context. -> Data.Int.Int64 -- ^ __range_max__: The sampler will sample integers from the interval [0, range_max). -> Bool -- ^ __unique__: If unique is true, we sample with rejection, so that all sampled -- candidates in a batch are unique. This requires some approximation to -- estimate the post-rejection sampling probabilities. -> Tensor v'1 Data.Int.Int64 -- ^ __true_classes__: A batch_size * num_true matrix, in which each row contains the -- IDs of the num_true target_classes in the corresponding original label. -> (Tensor Build Data.Int.Int64, Tensor Build Float, Tensor Build Float) -- ^ (__sampled_candidates__, __true_expected_count__, __sampled_expected_count__) -- -- * __sampled_candidates__: A vector of length num_sampled, in which each element is -- the ID of a sampled candidate. -- -- * __true_expected_count__: A batch_size * num_true matrix, representing -- the number of times each candidate is expected to occur in a batch -- of sampled candidates. If unique=true, then this is a probability. -- -- * __sampled_expected_count__: A vector of length num_sampled, for each sampled -- candidate representing the number of times the candidate is expected -- to occur in a batch of sampled candidates. If unique=true, then this is a -- probability. threadUnsafeUnigramCandidateSampler' op'options num_sampled num_true range_max unique true_classes | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs true_classes] return (opDef "ThreadUnsafeUnigramCandidateSampler" & opAttr "num_sampled" .~ num_sampled & opAttr "num_true" .~ num_true & opAttr "range_max" .~ range_max & opAttr "unique" .~ unique & op'options & opInputs .~ op'inputs) {- input_arg { name: "true_classes" description: "A batch_size * num_true matrix, in which each row contains the\nIDs of the num_true target_classes in the corresponding original label." type: DT_INT64 } output_arg { name: "sampled_candidates" description: "A vector of length num_sampled, in which each element is\nthe ID of a sampled candidate." type: DT_INT64 } output_arg { name: "true_expected_count" description: "A batch_size * num_true matrix, representing\nthe number of times each candidate is expected to occur in a batch\nof sampled candidates. If unique=true, then this is a probability." type: DT_FLOAT } output_arg { name: "sampled_expected_count" description: "A vector of length num_sampled, for each sampled\ncandidate representing the number of times the candidate is expected\nto occur in a batch of sampled candidates. If unique=true, then this is a\nprobability." type: DT_FLOAT } attr { name: "num_true" type: "int" description: "Number of true labels per context." has_minimum: true minimum: 1 } attr { name: "num_sampled" type: "int" description: "Number of candidates to randomly sample per batch." has_minimum: true minimum: 1 } attr { name: "unique" type: "bool" description: "If unique is true, we sample with rejection, so that all sampled\ncandidates in a batch are unique. This requires some approximation to\nestimate the post-rejection sampling probabilities." } attr { name: "range_max" type: "int" description: "The sampler will sample integers from the interval [0, range_max)." has_minimum: true minimum: 1 } attr { name: "seed" type: "int" default_value { i: 0 } description: "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed." } attr { name: "seed2" type: "int" default_value { i: 0 } description: "An second seed to avoid seed collision." } -} -- | Constructs a tensor by tiling a given tensor. -- -- This operation creates a new tensor by replicating `input` `multiples` times. -- The output tensor's i'th dimension has `input.dims(i) * multiples[i]` elements, -- and the values of `input` are replicated `multiples[i]` times along the 'i'th -- dimension. For example, tiling `[a b c d]` by `[2]` produces -- `[a b c d a b c d]`. tile :: forall v'1 v'2 t tmultiples . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tmultiples) => Tensor v'1 t -- ^ __input__: 1-D or higher. -> Tensor v'2 tmultiples -- ^ __multiples__: 1-D. Length must be the same as the number of dimensions in `input` -> Tensor Build t -- ^ __output__ tile = tile' id tile' :: forall v'1 v'2 t tmultiples . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tmultiples) => OpParams -> Tensor v'1 t -- ^ __input__: 1-D or higher. -> Tensor v'2 tmultiples -- ^ __multiples__: 1-D. Length must be the same as the number of dimensions in `input` -> Tensor Build t -- ^ __output__ tile' op'options input multiples | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs multiples] return (opDef "Tile" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tmultiples" .~ tensorType (undefined :: tmultiples) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "1-D or higher." type_attr: "T" } input_arg { name: "multiples" description: "1-D. Length must be the same as the number of dimensions in `input`" type_attr: "Tmultiples" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "Tmultiples" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | Returns the gradient of `Tile`. -- -- Since `Tile` takes an input and repeats the input `multiples` times -- along each dimension, `TileGrad` takes in `multiples` and aggregates -- each repeated tile of `input` into `output`. tileGrad :: forall v'1 v'2 t . (TensorType t) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 Data.Int.Int32 -- ^ __multiples__ -> Tensor Build t -- ^ __output__ tileGrad = tileGrad' id tileGrad' :: forall v'1 v'2 t . (TensorType t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 Data.Int.Int32 -- ^ __multiples__ -> Tensor Build t -- ^ __output__ tileGrad' op'options input multiples | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs multiples] return (opDef "TileGrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "multiples" type: DT_INT32 } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } -} -- | Finds values and indices of the `k` largest elements for the last dimension. -- -- If the input is a vector (rank-1), finds the `k` largest entries in the vector -- and outputs their values and indices as vectors. Thus `values[j]` is the -- `j`-th largest entry in `input`, and its index is `indices[j]`. -- -- For matrices (resp. higher rank input), computes the top `k` entries in each -- row (resp. vector along the last dimension). Thus, -- -- values.shape = indices.shape = input.shape[:-1] + [k] -- -- If two elements are equal, the lower-index element appears first. -- -- If `k` varies dynamically, use `TopKV2` below. topK :: forall v'1 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Data.Int.Int64 -- ^ __k__: Number of top elements to look for along the last dimension (along each -- row for matrices). -> Tensor v'1 t -- ^ __input__: 1-D or higher with last dimension at least `k`. -> (Tensor Build t, Tensor Build Data.Int.Int32) -- ^ (__values__, __indices__) -- -- * __values__: The `k` largest elements along each last dimensional slice. -- -- * __indices__: The indices of `values` within the last dimension of `input`. topK = topK' id topK' :: forall v'1 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Data.Int.Int64 -- ^ __k__: Number of top elements to look for along the last dimension (along each -- row for matrices). -> Tensor v'1 t -- ^ __input__: 1-D or higher with last dimension at least `k`. -> (Tensor Build t, Tensor Build Data.Int.Int32) -- ^ (__values__, __indices__) -- -- * __values__: The `k` largest elements along each last dimensional slice. -- -- * __indices__: The indices of `values` within the last dimension of `input`. topK' op'options k input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "TopK" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "k" .~ k & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "1-D or higher with last dimension at least `k`." type_attr: "T" } output_arg { name: "values" description: "The `k` largest elements along each last dimensional slice." type_attr: "T" } output_arg { name: "indices" description: "The indices of `values` within the last dimension of `input`." type: DT_INT32 } attr { name: "k" type: "int" description: "Number of top elements to look for along the last dimension (along each\nrow for matrices)." has_minimum: true } attr { name: "sorted" type: "bool" default_value { b: true } description: "If true the resulting `k` elements will be sorted by the values in\ndescending order." } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF } } } -} -- | Finds values and indices of the `k` largest elements for the last dimension. -- -- If the input is a vector (rank-1), finds the `k` largest entries in the vector -- and outputs their values and indices as vectors. Thus `values[j]` is the -- `j`-th largest entry in `input`, and its index is `indices[j]`. -- -- For matrices (resp. higher rank input), computes the top `k` entries in each -- row (resp. vector along the last dimension). Thus, -- -- values.shape = indices.shape = input.shape[:-1] + [k] -- -- If two elements are equal, the lower-index element appears first. topKV2 :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __input__: 1-D or higher with last dimension at least `k`. -> Tensor v'2 Data.Int.Int32 -- ^ __k__: 0-D. Number of top elements to look for along the last dimension (along each -- row for matrices). -> (Tensor Build t, Tensor Build Data.Int.Int32) -- ^ (__values__, __indices__) -- -- * __values__: The `k` largest elements along each last dimensional slice. -- -- * __indices__: The indices of `values` within the last dimension of `input`. topKV2 = topKV2' id topKV2' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__: 1-D or higher with last dimension at least `k`. -> Tensor v'2 Data.Int.Int32 -- ^ __k__: 0-D. Number of top elements to look for along the last dimension (along each -- row for matrices). -> (Tensor Build t, Tensor Build Data.Int.Int32) -- ^ (__values__, __indices__) -- -- * __values__: The `k` largest elements along each last dimensional slice. -- -- * __indices__: The indices of `values` within the last dimension of `input`. topKV2' op'options input k | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs k] return (opDef "TopKV2" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "1-D or higher with last dimension at least `k`." type_attr: "T" } input_arg { name: "k" description: "0-D. Number of top elements to look for along the last dimension (along each\nrow for matrices)." type: DT_INT32 } output_arg { name: "values" description: "The `k` largest elements along each last dimensional slice." type_attr: "T" } output_arg { name: "indices" description: "The indices of `values` within the last dimension of `input`." type: DT_INT32 } attr { name: "sorted" type: "bool" default_value { b: true } description: "If true the resulting `k` elements will be sorted by the values in\ndescending order." } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF } } } -} -- | Shuffle dimensions of x according to a permutation. -- -- The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy: -- `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]` transpose :: forall v'1 v'2 t tperm . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tperm) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 tperm -- ^ __perm__ -> Tensor Build t -- ^ __y__ transpose = transpose' id transpose' :: forall v'1 v'2 t tperm . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tperm) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 tperm -- ^ __perm__ -> Tensor Build t -- ^ __y__ transpose' op'options x perm | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs perm] return (opDef "Transpose" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tperm" .~ tensorType (undefined :: tperm) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "perm" type_attr: "Tperm" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "Tperm" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | Returns x / y element-wise for integer types. -- -- Truncation designates that negative numbers will round fractional quantities -- toward zero. I.e. -7 / 5 = 1. This matches C semantics but it is different -- than Python semantics. See `FloorDiv` for a division function that matches -- Python Semantics. -- -- *NOTE*: `TruncateDiv` supports broadcasting. More about broadcasting -- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) truncateDiv :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ truncateDiv = truncateDiv' id truncateDiv' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ truncateDiv' op'options x y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y] return (opDef "TruncateDiv" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_UINT8 type: DT_INT8 type: DT_UINT16 type: DT_INT16 type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | Returns element-wise remainder of division. This emulates C semantics where -- -- true, this follows C semantics in that the result here is consistent -- with a flooring divide. E.g. `floor(x / y) * y + mod(x, y) = x`. -- -- *NOTE*: `Mod` supports broadcasting. More about broadcasting -- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) truncateMod :: forall v'1 v'2 t . (OneOf '[Data.Int.Int32, Data.Int.Int64, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ truncateMod = truncateMod' id truncateMod' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int32, Data.Int.Int64, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ truncateMod' op'options x y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y] return (opDef "TruncateMod" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 type: DT_FLOAT type: DT_DOUBLE } } } -} -- | Outputs random values from a truncated normal distribution. -- -- The generated values follow a normal distribution with mean 0 and standard -- deviation 1, except that values whose magnitude is more than 2 standard -- deviations from the mean are dropped and re-picked. truncatedNormal :: forall v'1 dtype t m' . (MonadBuild m', OneOf '[Data.Word.Word16, Double, Float] dtype, OneOf '[Data.Int.Int32, Data.Int.Int64] t) => Tensor v'1 t -- ^ __shape__: The shape of the output tensor. -> m' (Tensor Value dtype) -- ^ __output__: A tensor of the specified shape filled with random truncated normal -- values. truncatedNormal = truncatedNormal' id truncatedNormal' :: forall v'1 dtype t m' . (MonadBuild m', OneOf '[Data.Word.Word16, Double, Float] dtype, OneOf '[Data.Int.Int32, Data.Int.Int64] t) => OpParams -> Tensor v'1 t -- ^ __shape__: The shape of the output tensor. -> m' (Tensor Value dtype) -- ^ __output__: A tensor of the specified shape filled with random truncated normal -- values. truncatedNormal' op'options shape | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs shape] buildOp [] (opDef "TruncatedNormal" & opAttr "dtype" .~ tensorType (undefined :: dtype) & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "shape" description: "The shape of the output tensor." type_attr: "T" } output_arg { name: "output" description: "A tensor of the specified shape filled with random truncated normal\nvalues." type_attr: "dtype" } attr { name: "seed" type: "int" default_value { i: 0 } description: "If either `seed` or `seed2` are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed." } attr { name: "seed2" type: "int" default_value { i: 0 } description: "A second seed to avoid seed collision." } attr { name: "dtype" type: "type" description: "The type of the output." allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "T" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | Generates labels for candidate sampling with a uniform distribution. -- -- See explanations of candidate sampling and the data formats at -- go/candidate-sampling. -- -- For each batch, this op picks a single set of sampled candidate labels. -- -- The advantages of sampling candidates per-batch are simplicity and the -- possibility of efficient dense matrix multiplication. The disadvantage is that -- the sampled candidates must be chosen independently of the context and of the -- true labels. uniformCandidateSampler :: Data.Int.Int64 -- ^ __num_sampled__: Number of candidates to randomly sample per batch. -> Data.Int.Int64 -- ^ __num_true__: Number of true labels per context. -> Data.Int.Int64 -- ^ __range_max__: The sampler will sample integers from the interval [0, range_max). -> Bool -- ^ __unique__: If unique is true, we sample with rejection, so that all sampled -- candidates in a batch are unique. This requires some approximation to -- estimate the post-rejection sampling probabilities. -> Tensor v'1 Data.Int.Int64 -- ^ __true_classes__: A batch_size * num_true matrix, in which each row contains the -- IDs of the num_true target_classes in the corresponding original label. -> (Tensor Build Data.Int.Int64, Tensor Build Float, Tensor Build Float) -- ^ (__sampled_candidates__, __true_expected_count__, __sampled_expected_count__) -- -- * __sampled_candidates__: A vector of length num_sampled, in which each element is -- the ID of a sampled candidate. -- -- * __true_expected_count__: A batch_size * num_true matrix, representing -- the number of times each candidate is expected to occur in a batch -- of sampled candidates. If unique=true, then this is a probability. -- -- * __sampled_expected_count__: A vector of length num_sampled, for each sampled -- candidate representing the number of times the candidate is expected -- to occur in a batch of sampled candidates. If unique=true, then this is a -- probability. uniformCandidateSampler = uniformCandidateSampler' id uniformCandidateSampler' :: OpParams -> Data.Int.Int64 -- ^ __num_sampled__: Number of candidates to randomly sample per batch. -> Data.Int.Int64 -- ^ __num_true__: Number of true labels per context. -> Data.Int.Int64 -- ^ __range_max__: The sampler will sample integers from the interval [0, range_max). -> Bool -- ^ __unique__: If unique is true, we sample with rejection, so that all sampled -- candidates in a batch are unique. This requires some approximation to -- estimate the post-rejection sampling probabilities. -> Tensor v'1 Data.Int.Int64 -- ^ __true_classes__: A batch_size * num_true matrix, in which each row contains the -- IDs of the num_true target_classes in the corresponding original label. -> (Tensor Build Data.Int.Int64, Tensor Build Float, Tensor Build Float) -- ^ (__sampled_candidates__, __true_expected_count__, __sampled_expected_count__) -- -- * __sampled_candidates__: A vector of length num_sampled, in which each element is -- the ID of a sampled candidate. -- -- * __true_expected_count__: A batch_size * num_true matrix, representing -- the number of times each candidate is expected to occur in a batch -- of sampled candidates. If unique=true, then this is a probability. -- -- * __sampled_expected_count__: A vector of length num_sampled, for each sampled -- candidate representing the number of times the candidate is expected -- to occur in a batch of sampled candidates. If unique=true, then this is a -- probability. uniformCandidateSampler' op'options num_sampled num_true range_max unique true_classes | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs true_classes] return (opDef "UniformCandidateSampler" & opAttr "num_sampled" .~ num_sampled & opAttr "num_true" .~ num_true & opAttr "range_max" .~ range_max & opAttr "unique" .~ unique & op'options & opInputs .~ op'inputs) {- input_arg { name: "true_classes" description: "A batch_size * num_true matrix, in which each row contains the\nIDs of the num_true target_classes in the corresponding original label." type: DT_INT64 } output_arg { name: "sampled_candidates" description: "A vector of length num_sampled, in which each element is\nthe ID of a sampled candidate." type: DT_INT64 } output_arg { name: "true_expected_count" description: "A batch_size * num_true matrix, representing\nthe number of times each candidate is expected to occur in a batch\nof sampled candidates. If unique=true, then this is a probability." type: DT_FLOAT } output_arg { name: "sampled_expected_count" description: "A vector of length num_sampled, for each sampled\ncandidate representing the number of times the candidate is expected\nto occur in a batch of sampled candidates. If unique=true, then this is a\nprobability." type: DT_FLOAT } attr { name: "num_true" type: "int" description: "Number of true labels per context." has_minimum: true minimum: 1 } attr { name: "num_sampled" type: "int" description: "Number of candidates to randomly sample per batch." has_minimum: true minimum: 1 } attr { name: "unique" type: "bool" description: "If unique is true, we sample with rejection, so that all sampled\ncandidates in a batch are unique. This requires some approximation to\nestimate the post-rejection sampling probabilities." } attr { name: "range_max" type: "int" description: "The sampler will sample integers from the interval [0, range_max)." has_minimum: true minimum: 1 } attr { name: "seed" type: "int" default_value { i: 0 } description: "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed." } attr { name: "seed2" type: "int" default_value { i: 0 } description: "An second seed to avoid seed collision." } -} -- | Finds unique elements in a 1-D tensor. -- -- This operation returns a tensor `y` containing all of the unique elements of `x` -- sorted in the same order that they occur in `x`. This operation also returns a -- tensor `idx` the same size as `x` that contains the index of each value of `x` -- in the unique output `y`. In other words: -- -- `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` -- -- For example: -- -- ```prettyprint -- # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] -- y, idx = unique(x) -- y ==> [1, 2, 4, 7, 8] -- idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] -- ``` unique :: forall v'1 t out_idx . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] out_idx) => Tensor v'1 t -- ^ __x__: 1-D. -> (Tensor Build t, Tensor Build out_idx) -- ^ (__y__, __idx__) -- -- * __y__: 1-D. -- -- * __idx__: 1-D. unique = unique' id unique' :: forall v'1 t out_idx . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] out_idx) => OpParams -> Tensor v'1 t -- ^ __x__: 1-D. -> (Tensor Build t, Tensor Build out_idx) -- ^ (__y__, __idx__) -- -- * __y__: 1-D. -- -- * __idx__: 1-D. unique' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "Unique" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "out_idx" .~ tensorType (undefined :: out_idx) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" description: "1-D." type_attr: "T" } output_arg { name: "y" description: "1-D." type_attr: "T" } output_arg { name: "idx" description: "1-D." type_attr: "out_idx" } attr { name: "T" type: "type" } attr { name: "out_idx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | Finds unique elements in a 1-D tensor. -- -- This operation returns a tensor `y` containing all of the unique elements of `x` -- sorted in the same order that they occur in `x`. This operation also returns a -- tensor `idx` the same size as `x` that contains the index of each value of `x` -- in the unique output `y`. Finally, it returns a third tensor `count` that -- contains the count of each element of `y` in `x`. In other words: -- -- `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` -- -- For example: -- -- ```prettyprint -- # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] -- y, idx, count = unique_with_counts(x) -- y ==> [1, 2, 4, 7, 8] -- idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] -- count ==> [2, 1, 3, 1, 2] -- ``` uniqueWithCounts :: forall v'1 t out_idx . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] out_idx) => Tensor v'1 t -- ^ __x__: 1-D. -> (Tensor Build t, Tensor Build out_idx, Tensor Build out_idx) -- ^ (__y__, __idx__, __count__) -- -- * __y__: 1-D. -- -- * __idx__: 1-D. -- -- * __count__: 1-D. uniqueWithCounts = uniqueWithCounts' id uniqueWithCounts' :: forall v'1 t out_idx . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] out_idx) => OpParams -> Tensor v'1 t -- ^ __x__: 1-D. -> (Tensor Build t, Tensor Build out_idx, Tensor Build out_idx) -- ^ (__y__, __idx__, __count__) -- -- * __y__: 1-D. -- -- * __idx__: 1-D. -- -- * __count__: 1-D. uniqueWithCounts' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "UniqueWithCounts" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "out_idx" .~ tensorType (undefined :: out_idx) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" description: "1-D." type_attr: "T" } output_arg { name: "y" description: "1-D." type_attr: "T" } output_arg { name: "idx" description: "1-D." type_attr: "out_idx" } output_arg { name: "count" description: "1-D." type_attr: "out_idx" } attr { name: "T" type: "type" } attr { name: "out_idx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | Unpacks a given dimension of a rank-`R` tensor into `num` rank-`(R-1)` tensors. -- -- Unpacks `num` tensors from `value` by chipping it along the `axis` dimension. -- For example, given a tensor of shape `(A, B, C, D)`; -- -- If `axis == 0` then the i'th tensor in `output` is the slice `value[i, :, :, :]` -- and each tensor in `output` will have shape `(B, C, D)`. (Note that the -- dimension unpacked along is gone, unlike `split`). -- -- If `axis == 1` then the i'th tensor in `output` is the slice `value[:, i, :, :]` -- and each tensor in `output` will have shape `(A, C, D)`. -- Etc. -- -- This is the opposite of `pack`. unpack :: forall v'1 t . (TensorType t) => Data.Int.Int64 -- ^ __num__ -> Tensor v'1 t -- ^ __value__: 1-D or higher, with `axis` dimension size equal to `num`. -> [Tensor Build t] -- ^ __output__: The list of tensors unpacked from `value`. unpack = unpack' id unpack' :: forall v'1 t . (TensorType t) => OpParams -> Data.Int.Int64 -- ^ __num__ -> Tensor v'1 t -- ^ __value__: 1-D or higher, with `axis` dimension size equal to `num`. -> [Tensor Build t] -- ^ __output__: The list of tensors unpacked from `value`. unpack' op'options num value | eqLengthGuard [] = pureOp [num] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs value] return (opDef "Unpack" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "num" .~ num & op'options & opInputs .~ op'inputs) {- input_arg { name: "value" description: "1-D or higher, with `axis` dimension size equal to `num`." type_attr: "T" } output_arg { name: "output" description: "The list of tensors unpacked from `value`." type_attr: "T" number_attr: "num" } attr { name: "num" type: "int" has_minimum: true } attr { name: "T" type: "type" } attr { name: "axis" type: "int" default_value { i: 0 } description: "Dimension along which to unpack. Negative values wrap around, so the\nvalid range is `[-R, R)`." } -} -- | Computes the Max along segments of a tensor. -- -- Read @{$math_ops#segmentation$the section on segmentation} for an explanation of -- segments. -- -- This operator is similar to the [unsorted segment sum operator](../../../api_docs/python/math_ops.md#UnsortedSegmentSum). -- Instead of computing the sum over segments, it computes the maximum -- such that: -- -- \\(output_i = \max_j data_j\\) where max is over `j` such -- that `segment_ids[j] == i`. -- -- If the maximum is empty for a given segment ID `i`, it outputs the smallest possible value for specific numeric type, -- `output[i] = numeric_limits::min()`. -- --
-- --
unsortedSegmentMax :: forall v'1 v'2 v'3 t tindices . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor v'1 t -- ^ __data__ -> Tensor v'2 tindices -- ^ __segment_ids__: A 1-D tensor whose rank is equal to the rank of `data`'s -- first dimension. -> Tensor v'3 Data.Int.Int32 -- ^ __num_segments__ -> Tensor Build t -- ^ __output__: Has same shape as data, except for dimension 0 which -- has size `num_segments`. unsortedSegmentMax = unsortedSegmentMax' id unsortedSegmentMax' :: forall v'1 v'2 v'3 t tindices . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor v'1 t -- ^ __data__ -> Tensor v'2 tindices -- ^ __segment_ids__: A 1-D tensor whose rank is equal to the rank of `data`'s -- first dimension. -> Tensor v'3 Data.Int.Int32 -- ^ __num_segments__ -> Tensor Build t -- ^ __output__: Has same shape as data, except for dimension 0 which -- has size `num_segments`. unsortedSegmentMax' op'options data' segment_ids num_segments | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs data', buildInputs segment_ids, buildInputs num_segments] return (opDef "UnsortedSegmentMax" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "data" type_attr: "T" } input_arg { name: "segment_ids" description: "A 1-D tensor whose rank is equal to the rank of `data`\'s\nfirst dimension." type_attr: "Tindices" } input_arg { name: "num_segments" type: DT_INT32 } output_arg { name: "output" description: "Has same shape as data, except for dimension 0 which\nhas size `num_segments`." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_HALF } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | Computes the sum along segments of a tensor. -- -- Read @{$math_ops#segmentation$the section on segmentation} for an explanation of -- segments. -- -- Computes a tensor such that -- `(output[i] = sum_{j...} data[j...]` where the sum is over tuples `j...` such -- that `segment_ids[j...] == i`. Unlike `SegmentSum`, `segment_ids` -- need not be sorted and need not cover all values in the full -- range of valid values. -- -- If the sum is empty for a given segment ID `i`, `output[i] = 0`. -- -- `num_segments` should equal the number of distinct segment IDs. -- --
-- --
unsortedSegmentSum :: forall v'1 v'2 v'3 t tindices . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor v'1 t -- ^ __data__ -> Tensor v'2 tindices -- ^ __segment_ids__: A tensor whose shape is a prefix of `data.shape`. -> Tensor v'3 Data.Int.Int32 -- ^ __num_segments__ -> Tensor Build t -- ^ __output__: Has same shape as data, except for the first `segment_ids.rank` -- dimensions, which are replaced with a single dimension which has size -- `num_segments`. unsortedSegmentSum = unsortedSegmentSum' id unsortedSegmentSum' :: forall v'1 v'2 v'3 t tindices . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor v'1 t -- ^ __data__ -> Tensor v'2 tindices -- ^ __segment_ids__: A tensor whose shape is a prefix of `data.shape`. -> Tensor v'3 Data.Int.Int32 -- ^ __num_segments__ -> Tensor Build t -- ^ __output__: Has same shape as data, except for the first `segment_ids.rank` -- dimensions, which are replaced with a single dimension which has size -- `num_segments`. unsortedSegmentSum' op'options data' segment_ids num_segments | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs data', buildInputs segment_ids, buildInputs num_segments] return (opDef "UnsortedSegmentSum" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "data" type_attr: "T" } input_arg { name: "segment_ids" description: "A tensor whose shape is a prefix of `data.shape`." type_attr: "Tindices" } input_arg { name: "num_segments" type: DT_INT32 } output_arg { name: "output" description: "Has same shape as data, except for the first `segment_ids.rank`\ndimensions, which are replaced with a single dimension which has size\n`num_segments`." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_HALF } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | Op is similar to a lightweight Dequeue. The basic funtionality is similar to -- -- dequeue with many fewer capabilities and options. This Op is optimized for -- performance. unstage :: forall dtypes m' . (MonadBuild m', TensorTypes dtypes) => m' (TensorList (Value) dtypes) -- ^ __values__ unstage = unstage' id unstage' :: forall dtypes m' . (MonadBuild m', TensorTypes dtypes) => OpParams -> m' (TensorList (Value) dtypes) -- ^ __values__ unstage' op'options | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "Unstage" & opAttr "dtypes" .~ fromTensorTypes (Proxy :: Proxy dtypes) & op'options & opInputs .~ op'inputs) {- output_arg { name: "values" type_list_attr: "dtypes" } attr { name: "dtypes" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } -} -- | Creates a handle to a Variable resource. varHandleOp :: forall m' . (MonadBuild m') => DataType -- ^ __dtype__: the type of this variable. Must agree with the dtypes -- of all ops using this variable. -> Shape -- ^ __shape__: The (possibly partially specified) shape of this variable. -> m' (Tensor Value ResourceHandle) -- ^ __resource__ varHandleOp = varHandleOp' id varHandleOp' :: forall m' . (MonadBuild m') => OpParams -> DataType -- ^ __dtype__: the type of this variable. Must agree with the dtypes -- of all ops using this variable. -> Shape -- ^ __shape__: The (possibly partially specified) shape of this variable. -> m' (Tensor Value ResourceHandle) -- ^ __resource__ varHandleOp' op'options dtype shape | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "VarHandleOp" & opAttr "dtype" .~ dtype & opAttr "shape" .~ shape & op'options & opInputs .~ op'inputs) {- output_arg { name: "resource" type: DT_RESOURCE } attr { name: "container" type: "string" default_value { s: "" } description: "the container this variable is placed in." } attr { name: "shared_name" type: "string" default_value { s: "" } description: "the name by which this variable is referred to." } attr { name: "dtype" type: "type" description: "the type of this variable. Must agree with the dtypes\nof all ops using this variable." } attr { name: "shape" type: "shape" description: "The (possibly partially specified) shape of this variable." } -} -- | Checks whether a resource handle-based variable has been initialized. varIsInitializedOp :: forall v'1 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __resource__: the input resource handle. -> m' (Tensor Value Bool) -- ^ __is_initialized__: a scalar boolean which is true if the variable has been -- initialized. varIsInitializedOp = varIsInitializedOp' id varIsInitializedOp' :: forall v'1 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __resource__: the input resource handle. -> m' (Tensor Value Bool) -- ^ __is_initialized__: a scalar boolean which is true if the variable has been -- initialized. varIsInitializedOp' op'options resource | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs resource] buildOp [] (opDef "VarIsInitializedOp" & op'options & opInputs .~ op'inputs) {- input_arg { name: "resource" description: "the input resource handle." type: DT_RESOURCE } output_arg { name: "is_initialized" description: "a scalar boolean which is true if the variable has been\ninitialized." type: DT_BOOL } -} -- | Use VariableV2 instead. variable :: forall dtype m' . (MonadBuild m', TensorType dtype) => Shape -- ^ __shape__ -> m' (Tensor Ref dtype) -- ^ __ref__ variable = variable' id variable' :: forall dtype m' . (MonadBuild m', TensorType dtype) => OpParams -> Shape -- ^ __shape__ -> m' (Tensor Ref dtype) -- ^ __ref__ variable' op'options shape | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "Variable" & opAttr "dtype" .~ tensorType (undefined :: dtype) & opAttr "shape" .~ shape & op'options & opInputs .~ op'inputs) {- output_arg { name: "ref" type_attr: "dtype" is_ref: true } attr { name: "shape" type: "shape" } attr { name: "dtype" type: "type" } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } -} -- | Holds state in the form of a tensor that persists across steps. -- -- Outputs a ref to the tensor state so it may be read or modified. -- TODO(zhifengc/mrry): Adds a pointer to a more detail document -- about sharing states in tensorflow. variableV2 :: forall dtype m' . (MonadBuild m', TensorType dtype) => Shape -- ^ __shape__: The shape of the variable tensor. -> m' (Tensor Ref dtype) -- ^ __ref__: A reference to the variable tensor. variableV2 = variableV2' id variableV2' :: forall dtype m' . (MonadBuild m', TensorType dtype) => OpParams -> Shape -- ^ __shape__: The shape of the variable tensor. -> m' (Tensor Ref dtype) -- ^ __ref__: A reference to the variable tensor. variableV2' op'options shape | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "VariableV2" & opAttr "dtype" .~ tensorType (undefined :: dtype) & opAttr "shape" .~ shape & op'options & opInputs .~ op'inputs) {- output_arg { name: "ref" description: "A reference to the variable tensor." type_attr: "dtype" is_ref: true } attr { name: "shape" type: "shape" description: "The shape of the variable tensor." } attr { name: "dtype" type: "type" description: "The type of elements in the variable tensor." } attr { name: "container" type: "string" default_value { s: "" } description: "If non-empty, this variable is placed in the given container.\nOtherwise, a default container is used." } attr { name: "shared_name" type: "string" default_value { s: "" } description: "If non-empty, this variable is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead." } -} -- | Returns locations of true values in a boolean tensor. -- -- This operation returns the coordinates of true elements in `input`. The -- coordinates are returned in a 2-D tensor where the first dimension (rows) -- represents the number of true elements, and the second dimension (columns) -- represents the coordinates of the true elements. Keep in mind, the shape of -- the output tensor can vary depending on how many true values there are in -- `input`. Indices are output in row-major order. -- -- For example: -- -- ```prettyprint -- # 'input' tensor is [[True, False] -- # [True, False]] -- # 'input' has two true values, so output has two coordinates. -- # 'input' has rank of 2, so coordinates have two indices. -- where(input) ==> [[0, 0], -- [1, 0]] -- -- # `input` tensor is [[[True, False] -- # [True, False]] -- # [[False, True] -- # [False, True]] -- # [[False, False] -- # [False, True]]] -- # 'input' has 5 true values, so output has 5 coordinates. -- # 'input' has rank of 3, so coordinates have three indices. -- where(input) ==> [[0, 0, 0], -- [0, 1, 0], -- [1, 0, 1], -- [1, 1, 1], -- [2, 1, 1]] -- ``` where' :: Tensor v'1 Bool -- ^ __input__ -> Tensor Build Data.Int.Int64 -- ^ __index__ where' = where'' id where'' :: OpParams -> Tensor v'1 Bool -- ^ __input__ -> Tensor Build Data.Int.Int64 -- ^ __index__ where'' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "Where" & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type: DT_BOOL } output_arg { name: "index" type: DT_INT64 } -} -- | A Reader that outputs the entire contents of a file as a value. -- -- To use, enqueue filenames in a Queue. The output of ReaderRead will -- be a filename (key) and the contents of that file (value). wholeFileReader :: forall m' . (MonadBuild m') => m' (Tensor Ref Data.ByteString.ByteString) -- ^ __reader_handle__: The handle to reference the Reader. wholeFileReader = wholeFileReader' id wholeFileReader' :: forall m' . (MonadBuild m') => OpParams -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __reader_handle__: The handle to reference the Reader. wholeFileReader' op'options | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "WholeFileReader" & op'options & opInputs .~ op'inputs) {- output_arg { name: "reader_handle" description: "The handle to reference the Reader." type: DT_STRING is_ref: true } attr { name: "container" type: "string" default_value { s: "" } description: "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used." } attr { name: "shared_name" type: "string" default_value { s: "" } description: "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead." } -} -- | A Reader that outputs the entire contents of a file as a value. -- -- To use, enqueue filenames in a Queue. The output of ReaderRead will -- be a filename (key) and the contents of that file (value). wholeFileReaderV2 :: forall m' . (MonadBuild m') => m' (Tensor Value ResourceHandle) -- ^ __reader_handle__: The handle to reference the Reader. wholeFileReaderV2 = wholeFileReaderV2' id wholeFileReaderV2' :: forall m' . (MonadBuild m') => OpParams -> m' (Tensor Value ResourceHandle) -- ^ __reader_handle__: The handle to reference the Reader. wholeFileReaderV2' op'options | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "WholeFileReaderV2" & op'options & opInputs .~ op'inputs) {- output_arg { name: "reader_handle" description: "The handle to reference the Reader." type: DT_RESOURCE } attr { name: "container" type: "string" default_value { s: "" } description: "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used." } attr { name: "shared_name" type: "string" default_value { s: "" } description: "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead." } -} -- | Writes contents to the file at input filename. Creates file if not existing. writeFile :: forall v'1 v'2 m' . (MonadBuild m') => Tensor v'1 Data.ByteString.ByteString -- ^ __filename__: scalar. The name of the file to which we write the contents. -> Tensor v'2 Data.ByteString.ByteString -- ^ __contents__: scalar. The content to be written to the output file. -> m' (ControlNode) writeFile = writeFile' id writeFile' :: forall v'1 v'2 m' . (MonadBuild m') => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __filename__: scalar. The name of the file to which we write the contents. -> Tensor v'2 Data.ByteString.ByteString -- ^ __contents__: scalar. The content to be written to the output file. -> m' (ControlNode) writeFile' op'options filename contents | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs filename, buildInputs contents] buildOp [] (opDef "WriteFile" & op'options & opInputs .~ op'inputs) {- input_arg { name: "filename" description: "scalar. The name of the file to which we write the contents." type: DT_STRING } input_arg { name: "contents" description: "scalar. The content to be written to the output file." type: DT_STRING } -} -- | Returns a tensor of zeros with the same shape and type as x. zerosLike :: forall v'1 t . (TensorType t) => Tensor v'1 t -- ^ __x__: a tensor of type T. -> Tensor Build t -- ^ __y__: a tensor of the same shape and type as x but filled with zeros. zerosLike = zerosLike' id zerosLike' :: forall v'1 t . (TensorType t) => OpParams -> Tensor v'1 t -- ^ __x__: a tensor of type T. -> Tensor Build t -- ^ __y__: a tensor of the same shape and type as x but filled with zeros. zerosLike' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "ZerosLike" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" description: "a tensor of type T." type_attr: "T" } output_arg { name: "y" description: "a tensor of the same shape and type as x but filled with zeros." type_attr: "T" } attr { name: "T" type: "type" } -} -- | Compute the Hurwitz zeta function \\(\zeta(x, q)\\). -- -- The Hurwitz zeta function is defined as: -- -- ``` -- \zeta(x, q) = \sum_{n=0}^{\infty} (q + n)^{-x} -- ``` zeta :: forall v'1 v'2 t . (OneOf '[Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __q__ -> Tensor Build t -- ^ __z__ zeta = zeta' id zeta' :: forall v'1 v'2 t . (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __q__ -> Tensor Build t -- ^ __z__ zeta' op'options x q | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs q] return (opDef "Zeta" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "q" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } -} -- | A graph node which represents an argument to a function. _Arg :: forall t m' . (MonadBuild m', TensorType t) => Data.Int.Int64 -- ^ __index__: This argument is the index-th argument of the function. -> m' (Tensor Value t) -- ^ __output__: The argument. _Arg = _Arg' id _Arg' :: forall t m' . (MonadBuild m', TensorType t) => OpParams -> Data.Int.Int64 -- ^ __index__: This argument is the index-th argument of the function. -> m' (Tensor Value t) -- ^ __output__: The argument. _Arg' op'options index | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "_Arg" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "index" .~ index & op'options & opInputs .~ op'inputs) {- output_arg { name: "output" description: "The argument." type_attr: "T" } attr { name: "T" type: "type" } attr { name: "index" type: "int" description: "This argument is the index-th argument of the function." has_minimum: true } -} -- | Converts an array of tensors to a list of tensors. _ArrayToList :: forall v'1 t out_types . (TensorType t, TensorTypes out_types) => [Tensor v'1 t] -- ^ __input__ -> TensorList (Build) out_types -- ^ __output__ _ArrayToList = _ArrayToList' id _ArrayToList' :: forall v'1 t out_types . (TensorType t, TensorTypes out_types) => OpParams -> [Tensor v'1 t] -- ^ __input__ -> TensorList (Build) out_types -- ^ __output__ _ArrayToList' op'options input | eqLengthGuard [("N", [("input", length input)])] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "_ArrayToList" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "out_types" .~ fromTensorTypes (Proxy :: Proxy out_types) & opAttr "N" .~ n & op'options & opInputs .~ op'inputs) where n = fromIntegral (length input) :: Int64 {- input_arg { name: "input" type_attr: "T" number_attr: "N" } output_arg { name: "output" type_list_attr: "out_types" } attr { name: "T" type: "type" } attr { name: "N" type: "int" has_minimum: true minimum: 1 } attr { name: "out_types" type: "list(type)" has_minimum: true minimum: 1 } -} -- | Cast x of type SrcT to y of DstT. -- -- _HostCast requires its input and produces its output in host memory. _HostCast :: forall v'1 srcT dstT . (TensorType srcT, TensorType dstT) => Tensor v'1 srcT -- ^ __x__ -> Tensor Build dstT -- ^ __y__ _HostCast = _HostCast' id _HostCast' :: forall v'1 srcT dstT . (TensorType srcT, TensorType dstT) => OpParams -> Tensor v'1 srcT -- ^ __x__ -> Tensor Build dstT -- ^ __y__ _HostCast' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "_HostCast" & opAttr "SrcT" .~ tensorType (undefined :: srcT) & opAttr "DstT" .~ tensorType (undefined :: dstT) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "SrcT" } output_arg { name: "y" type_attr: "DstT" } attr { name: "SrcT" type: "type" } attr { name: "DstT" type: "type" } -} -- | Receives the named tensor from send_device on recv_device. -- -- _HostRecv requires its input on host memory whereas _Recv requires its -- input on device memory. _HostRecv :: forall tensor_type m' . (MonadBuild m', TensorType tensor_type) => Data.Int.Int64 -- ^ __send_device_incarnation__: The current incarnation of send_device. -> m' (Tensor Value tensor_type) -- ^ __tensor__: The tensor to receive. _HostRecv = _HostRecv' id _HostRecv' :: forall tensor_type m' . (MonadBuild m', TensorType tensor_type) => OpParams -> Data.Int.Int64 -- ^ __send_device_incarnation__: The current incarnation of send_device. -> m' (Tensor Value tensor_type) -- ^ __tensor__: The tensor to receive. _HostRecv' op'options send_device_incarnation | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "_HostRecv" & opAttr "tensor_type" .~ tensorType (undefined :: tensor_type) & opAttr "send_device_incarnation" .~ send_device_incarnation & op'options & opInputs .~ op'inputs) {- output_arg { name: "tensor" description: "The tensor to receive." type_attr: "tensor_type" } attr { name: "tensor_type" type: "type" } attr { name: "tensor_name" type: "string" description: "The name of the tensor to receive." } attr { name: "send_device" type: "string" description: "The name of the device sending the tensor." } attr { name: "send_device_incarnation" type: "int" description: "The current incarnation of send_device." } attr { name: "recv_device" type: "string" description: "The name of the device receiving the tensor." } attr { name: "client_terminated" type: "bool" default_value { b: false } description: "If set to true, this indicates that the node was added\nto the graph as a result of a client-side feed or fetch of Tensor data,\nin which case the corresponding send or recv is expected to be managed\nlocally by the caller." } -} -- | Sends the named tensor from send_device to recv_device. -- -- _HostSend requires its input on host memory whereas _Send requires its -- input on device memory. _HostSend :: forall v'1 t m' . (MonadBuild m', TensorType t) => Data.Int.Int64 -- ^ __send_device_incarnation__: The current incarnation of send_device. -> Tensor v'1 t -- ^ __tensor__: The tensor to send. -> m' (ControlNode) _HostSend = _HostSend' id _HostSend' :: forall v'1 t m' . (MonadBuild m', TensorType t) => OpParams -> Data.Int.Int64 -- ^ __send_device_incarnation__: The current incarnation of send_device. -> Tensor v'1 t -- ^ __tensor__: The tensor to send. -> m' (ControlNode) _HostSend' op'options send_device_incarnation tensor | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs tensor] buildOp [] (opDef "_HostSend" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "send_device_incarnation" .~ send_device_incarnation & op'options & opInputs .~ op'inputs) {- input_arg { name: "tensor" description: "The tensor to send." type_attr: "T" } attr { name: "T" type: "type" } attr { name: "tensor_name" type: "string" description: "The name of the tensor to send." } attr { name: "send_device" type: "string" description: "The name of the device sending the tensor." } attr { name: "send_device_incarnation" type: "int" description: "The current incarnation of send_device." } attr { name: "recv_device" type: "string" description: "The name of the device receiving the tensor." } attr { name: "client_terminated" type: "bool" default_value { b: false } description: "If set to true, this indicates that the node was added\nto the graph as a result of a client-side feed or fetch of Tensor data,\nin which case the corresponding send or recv is expected to be managed\nlocally by the caller." } -} -- | Converts a list of tensors to an array of tensors. _ListToArray :: forall v'1 tin t . (TensorTypes tin, TensorType t) => Data.Int.Int64 -- ^ __N__ -> TensorList (v'1) tin -- ^ __input__ -> [Tensor Build t] -- ^ __output__ _ListToArray = _ListToArray' id _ListToArray' :: forall v'1 tin t . (TensorTypes tin, TensorType t) => OpParams -> Data.Int.Int64 -- ^ __N__ -> TensorList (v'1) tin -- ^ __input__ -> [Tensor Build t] -- ^ __output__ _ListToArray' op'options n input | eqLengthGuard [] = pureOp [n] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "_ListToArray" & opAttr "Tin" .~ fromTensorTypes (Proxy :: Proxy tin) & opAttr "T" .~ tensorType (undefined :: t) & opAttr "N" .~ n & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_list_attr: "Tin" } output_arg { name: "output" type_attr: "T" number_attr: "N" } attr { name: "Tin" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "T" type: "type" } attr { name: "N" type: "int" has_minimum: true minimum: 1 } -} -- | Creates an empty Tensor with shape `shape` and type `dtype`. -- -- The memory can optionally be initialized. This is usually useful in -- conjunction with inplace operations. _ParallelConcatStart :: forall dtype m' . (MonadBuild m', TensorType dtype) => Shape -- ^ __shape__: 1-D `Tensor` indicating the shape of the output. -> m' (Tensor Value dtype) -- ^ __output__: An empty Tensor of the specified type. _ParallelConcatStart = _ParallelConcatStart' id _ParallelConcatStart' :: forall dtype m' . (MonadBuild m', TensorType dtype) => OpParams -> Shape -- ^ __shape__: 1-D `Tensor` indicating the shape of the output. -> m' (Tensor Value dtype) -- ^ __output__: An empty Tensor of the specified type. _ParallelConcatStart' op'options shape | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "_ParallelConcatStart" & opAttr "dtype" .~ tensorType (undefined :: dtype) & opAttr "shape" .~ shape & op'options & opInputs .~ op'inputs) {- output_arg { name: "output" description: "An empty Tensor of the specified type." type_attr: "dtype" } attr { name: "shape" type: "shape" description: "1-D `Tensor` indicating the shape of the output." } attr { name: "dtype" type: "type" description: "The element type of the returned tensor." } -} -- | Updates input `value` at `loc` with `update`. -- -- If you use this function you will almost certainly want to add -- a control dependency as done in the implementation of parallel_stack to -- avoid race conditions. _ParallelConcatUpdate :: forall v'1 v'2 t . (TensorType t) => Data.Int.Int64 -- ^ __loc__: A scalar indicating the index of the first dimension such that -- value[loc, :] is updated. -> Tensor v'1 t -- ^ __value__: A `Tensor` object that will be updated in-place. -> Tensor v'2 t -- ^ __update__: A `Tensor` of rank one less than `value` if `loc` is a scalar, -- otherwise of rank equal to `value` that contains the new values -- for `value`. -> Tensor Build t -- ^ __output__: `value` that has been updated accordingly. _ParallelConcatUpdate = _ParallelConcatUpdate' id _ParallelConcatUpdate' :: forall v'1 v'2 t . (TensorType t) => OpParams -> Data.Int.Int64 -- ^ __loc__: A scalar indicating the index of the first dimension such that -- value[loc, :] is updated. -> Tensor v'1 t -- ^ __value__: A `Tensor` object that will be updated in-place. -> Tensor v'2 t -- ^ __update__: A `Tensor` of rank one less than `value` if `loc` is a scalar, -- otherwise of rank equal to `value` that contains the new values -- for `value`. -> Tensor Build t -- ^ __output__: `value` that has been updated accordingly. _ParallelConcatUpdate' op'options loc value update | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs value, buildInputs update] return (opDef "_ParallelConcatUpdate" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "loc" .~ loc & op'options & opInputs .~ op'inputs) {- input_arg { name: "value" description: "A `Tensor` object that will be updated in-place." type_attr: "T" } input_arg { name: "update" description: "A `Tensor` of rank one less than `value` if `loc` is a scalar,\notherwise of rank equal to `value` that contains the new values\nfor `value`." type_attr: "T" } output_arg { name: "output" description: "`value` that has been updated accordingly." type_attr: "T" } attr { name: "T" type: "type" } attr { name: "loc" type: "int" description: "A scalar indicating the index of the first dimension such that\nvalue[loc, :] is updated." } -} -- | Receives the named tensor from send_device on recv_device. _Recv :: forall tensor_type m' . (MonadBuild m', TensorType tensor_type) => Data.Int.Int64 -- ^ __send_device_incarnation__: The current incarnation of send_device. -> m' (Tensor Value tensor_type) -- ^ __tensor__: The tensor to receive. _Recv = _Recv' id _Recv' :: forall tensor_type m' . (MonadBuild m', TensorType tensor_type) => OpParams -> Data.Int.Int64 -- ^ __send_device_incarnation__: The current incarnation of send_device. -> m' (Tensor Value tensor_type) -- ^ __tensor__: The tensor to receive. _Recv' op'options send_device_incarnation | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "_Recv" & opAttr "tensor_type" .~ tensorType (undefined :: tensor_type) & opAttr "send_device_incarnation" .~ send_device_incarnation & op'options & opInputs .~ op'inputs) {- output_arg { name: "tensor" description: "The tensor to receive." type_attr: "tensor_type" } attr { name: "tensor_type" type: "type" } attr { name: "tensor_name" type: "string" description: "The name of the tensor to receive." } attr { name: "send_device" type: "string" description: "The name of the device sending the tensor." } attr { name: "send_device_incarnation" type: "int" description: "The current incarnation of send_device." } attr { name: "recv_device" type: "string" description: "The name of the device receiving the tensor." } attr { name: "client_terminated" type: "bool" default_value { b: false } description: "If set to true, this indicates that the node was added\nto the graph as a result of a client-side feed or fetch of Tensor data,\nin which case the corresponding send or recv is expected to be managed\nlocally by the caller." } -} -- | A graph node which represents a return value of a function. _Retval :: forall v'1 t m' . (MonadBuild m', TensorType t) => Data.Int.Int64 -- ^ __index__: This return value is the index-th return value of the function. -> Tensor v'1 t -- ^ __input__: The return value. -> m' (ControlNode) _Retval = _Retval' id _Retval' :: forall v'1 t m' . (MonadBuild m', TensorType t) => OpParams -> Data.Int.Int64 -- ^ __index__: This return value is the index-th return value of the function. -> Tensor v'1 t -- ^ __input__: The return value. -> m' (ControlNode) _Retval' op'options index input | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] buildOp [] (opDef "_Retval" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "index" .~ index & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "The return value." type_attr: "T" } attr { name: "T" type: "type" } attr { name: "index" type: "int" description: "This return value is the index-th return value of the function." has_minimum: true } -} -- | Sends the named tensor from send_device to recv_device. _Send :: forall v'1 t m' . (MonadBuild m', TensorType t) => Data.Int.Int64 -- ^ __send_device_incarnation__: The current incarnation of send_device. -> Tensor v'1 t -- ^ __tensor__: The tensor to send. -> m' (ControlNode) _Send = _Send' id _Send' :: forall v'1 t m' . (MonadBuild m', TensorType t) => OpParams -> Data.Int.Int64 -- ^ __send_device_incarnation__: The current incarnation of send_device. -> Tensor v'1 t -- ^ __tensor__: The tensor to send. -> m' (ControlNode) _Send' op'options send_device_incarnation tensor | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs tensor] buildOp [] (opDef "_Send" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "send_device_incarnation" .~ send_device_incarnation & op'options & opInputs .~ op'inputs) {- input_arg { name: "tensor" description: "The tensor to send." type_attr: "T" } attr { name: "T" type: "type" } attr { name: "tensor_name" type: "string" description: "The name of the tensor to send." } attr { name: "send_device" type: "string" description: "The name of the device sending the tensor." } attr { name: "send_device_incarnation" type: "int" description: "The current incarnation of send_device." } attr { name: "recv_device" type: "string" description: "The name of the device receiving the tensor." } attr { name: "client_terminated" type: "bool" default_value { b: false } description: "If set to true, this indicates that the node was added\nto the graph as a result of a client-side feed or fetch of Tensor data,\nin which case the corresponding send or recv is expected to be managed\nlocally by the caller." } -} -- | Reads the value of a variable without any memory model. -- -- The tensor returned by this operation aliases a mutable Tensor, and its value -- can be observed to be different by different ops. -- -- Internal and private to the tensorflow implementation. _UnsafeReadVariable :: forall v'1 dtype m' . (MonadBuild m', TensorType dtype) => Tensor v'1 ResourceHandle -- ^ __resource__: handle to the resource in which to store the variable. -> m' (Tensor Value dtype) -- ^ __value__ _UnsafeReadVariable = _UnsafeReadVariable' id _UnsafeReadVariable' :: forall v'1 dtype m' . (MonadBuild m', TensorType dtype) => OpParams -> Tensor v'1 ResourceHandle -- ^ __resource__: handle to the resource in which to store the variable. -> m' (Tensor Value dtype) -- ^ __value__ _UnsafeReadVariable' op'options resource | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs resource] buildOp [] (opDef "_UnsafeReadVariable" & opAttr "dtype" .~ tensorType (undefined :: dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "resource" description: "handle to the resource in which to store the variable." type: DT_RESOURCE } output_arg { name: "value" type_attr: "dtype" } attr { name: "dtype" type: "type" description: "the dtype of the value." } -}