{-# LANGUAGE ConstraintKinds #-} {-# LANGUAGE DataKinds #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE ScopedTypeVariables #-} {-# OPTIONS_GHC -fno-warn-name-shadowing #-} {-# OPTIONS_GHC -fno-warn-incomplete-patterns #-} module TensorFlow.GenOps.Core where import Data.ByteString (ByteString) import Data.Complex (Complex) import Data.Int (Int8, Int16, Int32, Int64) import Data.Proxy (Proxy(Proxy)) import Data.Word (Word8, Word16, Word32, Word64) import Lens.Family2 ((.~), (&)) import TensorFlow.Build import TensorFlow.BuildOp import TensorFlow.Tensor import TensorFlow.Types -- | abort :: forall m' . (MonadBuild m') => m' (ControlNode) abort = abort' id abort' :: forall m' . (MonadBuild m') => OpParams -> m' (ControlNode) abort' op'options | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "Abort" & op'options & opInputs .~ op'inputs) {- attr { name: "error_msg" type: "string" default_value { s: "" } } attr { name: "exit_without_error" type: "bool" default_value { b: false } } -} -- | abs :: forall v'1 t . (OneOf '[Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ abs = abs' id abs' :: forall v'1 t . (OneOf '[Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ abs' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "Abs" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 } } } -} -- | accumulateNV2 :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Shape -- ^ __shape__ -> [Tensor v'1 t] -- ^ __inputs__ -> Tensor Build t -- ^ __sum__ accumulateNV2 = accumulateNV2' id accumulateNV2' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Shape -- ^ __shape__ -> [Tensor v'1 t] -- ^ __inputs__ -> Tensor Build t -- ^ __sum__ accumulateNV2' op'options shape inputs | eqLengthGuard [("N", [("inputs", length inputs)])] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs inputs] return (opDef "AccumulateNV2" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "shape" .~ shape & opAttr "N" .~ n & op'options & opInputs .~ op'inputs) where n = fromIntegral (length inputs) :: Int64 {- input_arg { name: "inputs" type_attr: "T" number_attr: "N" } output_arg { name: "sum" type_attr: "T" } attr { name: "N" type: "int" has_minimum: true minimum: 1 } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "shape" type: "shape" } -} -- | accumulatorApplyGradient :: forall v'2 v'3 dtype m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] dtype) => Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Data.Int.Int64 -- ^ __local_step__ -> Tensor v'3 dtype -- ^ __gradient__ -> m' (ControlNode) accumulatorApplyGradient = accumulatorApplyGradient' id accumulatorApplyGradient' :: forall v'2 v'3 dtype m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] dtype) => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Data.Int.Int64 -- ^ __local_step__ -> Tensor v'3 dtype -- ^ __gradient__ -> m' (ControlNode) accumulatorApplyGradient' op'options handle local_step gradient | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs local_step, buildInputs gradient] buildOp [] (opDef "AccumulatorApplyGradient" & opAttr "dtype" .~ tensorType (undefined :: dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_STRING is_ref: true } input_arg { name: "local_step" type: DT_INT64 } input_arg { name: "gradient" type_attr: "dtype" } attr { name: "dtype" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } -} -- | accumulatorNumAccumulated :: forall m' . (MonadBuild m') => Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> m' (Tensor Value Data.Int.Int32) -- ^ __num_accumulated__ accumulatorNumAccumulated = accumulatorNumAccumulated' id accumulatorNumAccumulated' :: forall m' . (MonadBuild m') => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> m' (Tensor Value Data.Int.Int32) -- ^ __num_accumulated__ accumulatorNumAccumulated' op'options handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle] buildOp [] (opDef "AccumulatorNumAccumulated" & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_STRING is_ref: true } output_arg { name: "num_accumulated" type: DT_INT32 } -} -- | accumulatorSetGlobalStep :: forall v'2 m' . (MonadBuild m') => Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Data.Int.Int64 -- ^ __new_global_step__ -> m' (ControlNode) accumulatorSetGlobalStep = accumulatorSetGlobalStep' id accumulatorSetGlobalStep' :: forall v'2 m' . (MonadBuild m') => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Data.Int.Int64 -- ^ __new_global_step__ -> m' (ControlNode) accumulatorSetGlobalStep' op'options handle new_global_step | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs new_global_step] buildOp [] (opDef "AccumulatorSetGlobalStep" & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_STRING is_ref: true } input_arg { name: "new_global_step" type: DT_INT64 } -} -- | accumulatorTakeGradient :: forall v'2 dtype m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] dtype) => Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __num_required__ -> m' (Tensor Value dtype) -- ^ __average__ accumulatorTakeGradient = accumulatorTakeGradient' id accumulatorTakeGradient' :: forall v'2 dtype m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] dtype) => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __num_required__ -> m' (Tensor Value dtype) -- ^ __average__ accumulatorTakeGradient' op'options handle num_required | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs num_required] buildOp [] (opDef "AccumulatorTakeGradient" & opAttr "dtype" .~ tensorType (undefined :: dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_STRING is_ref: true } input_arg { name: "num_required" type: DT_INT32 } output_arg { name: "average" type_attr: "dtype" } attr { name: "dtype" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } -} -- | acos :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ acos = acos' id acos' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ acos' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "Acos" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | acosh :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ acosh = acosh' id acosh' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ acosh' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "Acosh" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | add :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.ByteString.ByteString, Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ add = add' id add' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.ByteString.ByteString, Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ add' op'options x y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y] return (opDef "Add" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_UINT8 type: DT_INT8 type: DT_INT16 type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_STRING } } } -} -- | addManySparseToTensorsMap :: forall v'1 v'2 v'3 t m' . (MonadBuild m', TensorType t) => Tensor v'1 Data.Int.Int64 -- ^ __sparse_indices__ -> Tensor v'2 t -- ^ __sparse_values__ -> Tensor v'3 Data.Int.Int64 -- ^ __sparse_shape__ -> m' (Tensor Value Data.Int.Int64) -- ^ __sparse_handles__ addManySparseToTensorsMap = addManySparseToTensorsMap' id addManySparseToTensorsMap' :: forall v'1 v'2 v'3 t m' . (MonadBuild m', TensorType t) => OpParams -> Tensor v'1 Data.Int.Int64 -- ^ __sparse_indices__ -> Tensor v'2 t -- ^ __sparse_values__ -> Tensor v'3 Data.Int.Int64 -- ^ __sparse_shape__ -> m' (Tensor Value Data.Int.Int64) -- ^ __sparse_handles__ addManySparseToTensorsMap' op'options sparse_indices sparse_values sparse_shape | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs sparse_indices, buildInputs sparse_values, buildInputs sparse_shape] buildOp [] (opDef "AddManySparseToTensorsMap" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "sparse_indices" type: DT_INT64 } input_arg { name: "sparse_values" type_attr: "T" } input_arg { name: "sparse_shape" type: DT_INT64 } output_arg { name: "sparse_handles" type: DT_INT64 } attr { name: "T" type: "type" } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } -} -- | addN :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float, Variant] t) => [Tensor v'1 t] -- ^ __inputs__ -> Tensor Build t -- ^ __sum__ addN = addN' id addN' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float, Variant] t) => OpParams -> [Tensor v'1 t] -- ^ __inputs__ -> Tensor Build t -- ^ __sum__ addN' op'options inputs | eqLengthGuard [("N", [("inputs", length inputs)])] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs inputs] return (opDef "AddN" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "N" .~ n & op'options & opInputs .~ op'inputs) where n = fromIntegral (length inputs) :: Int64 {- input_arg { name: "inputs" type_attr: "T" number_attr: "N" } output_arg { name: "sum" type_attr: "T" } attr { name: "N" type: "int" has_minimum: true minimum: 1 } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 type: DT_VARIANT } } } -} -- | addSparseToTensorsMap :: forall v'1 v'2 v'3 t m' . (MonadBuild m', TensorType t) => Tensor v'1 Data.Int.Int64 -- ^ __sparse_indices__ -> Tensor v'2 t -- ^ __sparse_values__ -> Tensor v'3 Data.Int.Int64 -- ^ __sparse_shape__ -> m' (Tensor Value Data.Int.Int64) -- ^ __sparse_handle__ addSparseToTensorsMap = addSparseToTensorsMap' id addSparseToTensorsMap' :: forall v'1 v'2 v'3 t m' . (MonadBuild m', TensorType t) => OpParams -> Tensor v'1 Data.Int.Int64 -- ^ __sparse_indices__ -> Tensor v'2 t -- ^ __sparse_values__ -> Tensor v'3 Data.Int.Int64 -- ^ __sparse_shape__ -> m' (Tensor Value Data.Int.Int64) -- ^ __sparse_handle__ addSparseToTensorsMap' op'options sparse_indices sparse_values sparse_shape | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs sparse_indices, buildInputs sparse_values, buildInputs sparse_shape] buildOp [] (opDef "AddSparseToTensorsMap" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "sparse_indices" type: DT_INT64 } input_arg { name: "sparse_values" type_attr: "T" } input_arg { name: "sparse_shape" type: DT_INT64 } output_arg { name: "sparse_handle" type: DT_INT64 } attr { name: "T" type: "type" } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } -} -- | addV2 :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ addV2 = addV2' id addV2' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ addV2' op'options x y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y] return (opDef "AddV2" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_UINT8 type: DT_INT8 type: DT_INT16 type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | adjustContrast :: forall v'1 v'2 v'3 v'4 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __images__ -> Tensor v'2 Float -- ^ __contrast_factor__ -> Tensor v'3 Float -- ^ __min_value__ -> Tensor v'4 Float -- ^ __max_value__ -> Tensor Build Float -- ^ __output__ adjustContrast = adjustContrast' id adjustContrast' :: forall v'1 v'2 v'3 v'4 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __images__ -> Tensor v'2 Float -- ^ __contrast_factor__ -> Tensor v'3 Float -- ^ __min_value__ -> Tensor v'4 Float -- ^ __max_value__ -> Tensor Build Float -- ^ __output__ adjustContrast' op'options images contrast_factor min_value max_value | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs images, buildInputs contrast_factor, buildInputs min_value, buildInputs max_value] return (opDef "AdjustContrast" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "images" type_attr: "T" } input_arg { name: "contrast_factor" type: DT_FLOAT } input_arg { name: "min_value" type: DT_FLOAT } input_arg { name: "max_value" type: DT_FLOAT } output_arg { name: "output" type: DT_FLOAT } attr { name: "T" type: "type" allowed_values { list { type: DT_UINT8 type: DT_INT8 type: DT_INT16 type: DT_INT32 type: DT_INT64 type: DT_FLOAT type: DT_DOUBLE } } } -} -- | adjustContrastv2 :: forall v'1 v'2 t . (OneOf '[Data.Word.Word16, Float] t) => Tensor v'1 t -- ^ __images__ -> Tensor v'2 Float -- ^ __contrast_factor__ -> Tensor Build t -- ^ __output__ adjustContrastv2 = adjustContrastv2' id adjustContrastv2' :: forall v'1 v'2 t . (OneOf '[Data.Word.Word16, Float] t) => OpParams -> Tensor v'1 t -- ^ __images__ -> Tensor v'2 Float -- ^ __contrast_factor__ -> Tensor Build t -- ^ __output__ adjustContrastv2' op'options images contrast_factor | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs images, buildInputs contrast_factor] return (opDef "AdjustContrastv2" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "images" type_attr: "T" } input_arg { name: "contrast_factor" type: DT_FLOAT } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_HALF type: DT_FLOAT } } } -} -- | adjustHue :: forall v'1 v'2 t . (OneOf '[Data.Word.Word16, Float] t) => Tensor v'1 t -- ^ __images__ -> Tensor v'2 Float -- ^ __delta__ -> Tensor Build t -- ^ __output__ adjustHue = adjustHue' id adjustHue' :: forall v'1 v'2 t . (OneOf '[Data.Word.Word16, Float] t) => OpParams -> Tensor v'1 t -- ^ __images__ -> Tensor v'2 Float -- ^ __delta__ -> Tensor Build t -- ^ __output__ adjustHue' op'options images delta | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs images, buildInputs delta] return (opDef "AdjustHue" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "images" type_attr: "T" } input_arg { name: "delta" type: DT_FLOAT } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_HALF type: DT_FLOAT } } } -} -- | adjustSaturation :: forall v'1 v'2 t . (OneOf '[Data.Word.Word16, Float] t) => Tensor v'1 t -- ^ __images__ -> Tensor v'2 Float -- ^ __scale__ -> Tensor Build t -- ^ __output__ adjustSaturation = adjustSaturation' id adjustSaturation' :: forall v'1 v'2 t . (OneOf '[Data.Word.Word16, Float] t) => OpParams -> Tensor v'1 t -- ^ __images__ -> Tensor v'2 Float -- ^ __scale__ -> Tensor Build t -- ^ __output__ adjustSaturation' op'options images scale | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs images, buildInputs scale] return (opDef "AdjustSaturation" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "images" type_attr: "T" } input_arg { name: "scale" type: DT_FLOAT } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_HALF type: DT_FLOAT } } } -} -- | all :: forall v'1 v'2 tidx . (OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => Tensor v'1 Bool -- ^ __input__ -> Tensor v'2 tidx -- ^ __reduction_indices__ -> Tensor Build Bool -- ^ __output__ all = all' id all' :: forall v'1 v'2 tidx . (OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => OpParams -> Tensor v'1 Bool -- ^ __input__ -> Tensor v'2 tidx -- ^ __reduction_indices__ -> Tensor Build Bool -- ^ __output__ all' op'options input reduction_indices | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs reduction_indices] return (opDef "All" & opAttr "Tidx" .~ tensorType (undefined :: tidx) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type: DT_BOOL } input_arg { name: "reduction_indices" type_attr: "Tidx" } output_arg { name: "output" type: DT_BOOL } attr { name: "keep_dims" type: "bool" default_value { b: false } } attr { name: "Tidx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | allCandidateSampler :: forall v'1 m' . (MonadBuild m') => Data.Int.Int64 -- ^ __num_sampled__ -> Data.Int.Int64 -- ^ __num_true__ -> Bool -- ^ __unique__ -> Tensor v'1 Data.Int.Int64 -- ^ __true_classes__ -> m' ((Tensor Value Data.Int.Int64, Tensor Value Float, Tensor Value Float)) -- ^ (__sampled_candidates__, __true_expected_count__, __sampled_expected_count__) -- -- * __sampled_candidates__ -- -- * __true_expected_count__ -- -- * __sampled_expected_count__ allCandidateSampler = allCandidateSampler' id allCandidateSampler' :: forall v'1 m' . (MonadBuild m') => OpParams -> Data.Int.Int64 -- ^ __num_sampled__ -> Data.Int.Int64 -- ^ __num_true__ -> Bool -- ^ __unique__ -> Tensor v'1 Data.Int.Int64 -- ^ __true_classes__ -> m' ((Tensor Value Data.Int.Int64, Tensor Value Float, Tensor Value Float)) -- ^ (__sampled_candidates__, __true_expected_count__, __sampled_expected_count__) -- -- * __sampled_candidates__ -- -- * __true_expected_count__ -- -- * __sampled_expected_count__ allCandidateSampler' op'options num_sampled num_true unique true_classes | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs true_classes] buildOp [] (opDef "AllCandidateSampler" & opAttr "num_sampled" .~ num_sampled & opAttr "num_true" .~ num_true & opAttr "unique" .~ unique & op'options & opInputs .~ op'inputs) {- input_arg { name: "true_classes" type: DT_INT64 } output_arg { name: "sampled_candidates" type: DT_INT64 } output_arg { name: "true_expected_count" type: DT_FLOAT } output_arg { name: "sampled_expected_count" type: DT_FLOAT } attr { name: "num_true" type: "int" has_minimum: true minimum: 1 } attr { name: "num_sampled" type: "int" has_minimum: true minimum: 1 } attr { name: "unique" type: "bool" } attr { name: "seed" type: "int" default_value { i: 0 } } attr { name: "seed2" type: "int" default_value { i: 0 } } -} -- | allToAll :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Bool, Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Data.Int.Int64 -- ^ __concat_dimension__ -> Data.Int.Int64 -- ^ __split_count__ -> Data.Int.Int64 -- ^ __split_dimension__ -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 Data.Int.Int32 -- ^ __group_assignment__ -> Tensor Build t -- ^ __output__ allToAll = allToAll' id allToAll' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Bool, Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Data.Int.Int64 -- ^ __concat_dimension__ -> Data.Int.Int64 -- ^ __split_count__ -> Data.Int.Int64 -- ^ __split_dimension__ -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 Data.Int.Int32 -- ^ __group_assignment__ -> Tensor Build t -- ^ __output__ allToAll' op'options concat_dimension split_count split_dimension input group_assignment | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs group_assignment] return (opDef "AllToAll" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "concat_dimension" .~ concat_dimension & opAttr "split_count" .~ split_count & opAttr "split_dimension" .~ split_dimension & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "group_assignment" type: DT_INT32 } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 type: DT_BOOL } } } attr { name: "concat_dimension" type: "int" } attr { name: "split_dimension" type: "int" } attr { name: "split_count" type: "int" } -} -- | angle :: forall v'1 t tout . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float)] t, OneOf '[Double, Float] tout) => Tensor v'1 t -- ^ __input__ -> Tensor Build tout -- ^ __output__ angle = angle' id angle' :: forall v'1 t tout . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float)] t, OneOf '[Double, Float] tout) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor Build tout -- ^ __output__ angle' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "Angle" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tout" .~ tensorType (undefined :: tout) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type_attr: "Tout" } attr { name: "T" type: "type" default_value { type: DT_COMPLEX64 } allowed_values { list { type: DT_COMPLEX64 type: DT_COMPLEX128 } } } attr { name: "Tout" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } -} -- | anonymousIterator :: forall m' . (MonadBuild m') => [DataType] -- ^ __output_types__ -> m' (Tensor Value ResourceHandle) -- ^ __handle__ anonymousIterator = anonymousIterator' id anonymousIterator' :: forall m' . (MonadBuild m') => OpParams -> [DataType] -- ^ __output_types__ -> m' (Tensor Value ResourceHandle) -- ^ __handle__ anonymousIterator' op'options output_types | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "AnonymousIterator" & opAttr "output_types" .~ output_types & op'options & opInputs .~ op'inputs) {- output_arg { name: "handle" type: DT_RESOURCE } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } -} -- | anonymousIteratorV2 :: forall m' . (MonadBuild m') => [DataType] -- ^ __output_types__ -> m' ((Tensor Value ResourceHandle, Tensor Value Variant)) -- ^ (__handle__, __deleter__) -- -- * __handle__ -- -- * __deleter__ anonymousIteratorV2 = anonymousIteratorV2' id anonymousIteratorV2' :: forall m' . (MonadBuild m') => OpParams -> [DataType] -- ^ __output_types__ -> m' ((Tensor Value ResourceHandle, Tensor Value Variant)) -- ^ (__handle__, __deleter__) -- -- * __handle__ -- -- * __deleter__ anonymousIteratorV2' op'options output_types | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "AnonymousIteratorV2" & opAttr "output_types" .~ output_types & op'options & opInputs .~ op'inputs) {- output_arg { name: "handle" type: DT_RESOURCE } output_arg { name: "deleter" type: DT_VARIANT } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } -} -- | any :: forall v'1 v'2 tidx . (OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => Tensor v'1 Bool -- ^ __input__ -> Tensor v'2 tidx -- ^ __reduction_indices__ -> Tensor Build Bool -- ^ __output__ any = any' id any' :: forall v'1 v'2 tidx . (OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => OpParams -> Tensor v'1 Bool -- ^ __input__ -> Tensor v'2 tidx -- ^ __reduction_indices__ -> Tensor Build Bool -- ^ __output__ any' op'options input reduction_indices | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs reduction_indices] return (opDef "Any" & opAttr "Tidx" .~ tensorType (undefined :: tidx) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type: DT_BOOL } input_arg { name: "reduction_indices" type_attr: "Tidx" } output_arg { name: "output" type: DT_BOOL } attr { name: "keep_dims" type: "bool" default_value { b: false } } attr { name: "Tidx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | applyAdaMax :: forall v'4 v'5 v'6 v'7 v'8 v'9 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor Ref t -- ^ __var__ -> Tensor Ref t -- ^ __m__ -> Tensor Ref t -- ^ __v__ -> Tensor v'4 t -- ^ __beta1_power__ -> Tensor v'5 t -- ^ __lr__ -> Tensor v'6 t -- ^ __beta1__ -> Tensor v'7 t -- ^ __beta2__ -> Tensor v'8 t -- ^ __epsilon__ -> Tensor v'9 t -- ^ __grad__ -> m' (Tensor Ref t) -- ^ __out__ applyAdaMax = applyAdaMax' id applyAdaMax' :: forall v'4 v'5 v'6 v'7 v'8 v'9 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor Ref t -- ^ __var__ -> Tensor Ref t -- ^ __m__ -> Tensor Ref t -- ^ __v__ -> Tensor v'4 t -- ^ __beta1_power__ -> Tensor v'5 t -- ^ __lr__ -> Tensor v'6 t -- ^ __beta1__ -> Tensor v'7 t -- ^ __beta2__ -> Tensor v'8 t -- ^ __epsilon__ -> Tensor v'9 t -- ^ __grad__ -> m' (Tensor Ref t) -- ^ __out__ applyAdaMax' op'options var m v beta1_power lr beta1 beta2 epsilon grad | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs m, buildInputs v, buildInputs beta1_power, buildInputs lr, buildInputs beta1, buildInputs beta2, buildInputs epsilon, buildInputs grad] buildOp [] (opDef "ApplyAdaMax" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" type_attr: "T" is_ref: true } input_arg { name: "m" type_attr: "T" is_ref: true } input_arg { name: "v" type_attr: "T" is_ref: true } input_arg { name: "beta1_power" type_attr: "T" } input_arg { name: "lr" type_attr: "T" } input_arg { name: "beta1" type_attr: "T" } input_arg { name: "beta2" type_attr: "T" } input_arg { name: "epsilon" type_attr: "T" } input_arg { name: "grad" type_attr: "T" } output_arg { name: "out" type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } } -} -- | applyAdadelta :: forall v'4 v'5 v'6 v'7 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor Ref t -- ^ __var__ -> Tensor Ref t -- ^ __accum__ -> Tensor Ref t -- ^ __accum_update__ -> Tensor v'4 t -- ^ __lr__ -> Tensor v'5 t -- ^ __rho__ -> Tensor v'6 t -- ^ __epsilon__ -> Tensor v'7 t -- ^ __grad__ -> m' (Tensor Ref t) -- ^ __out__ applyAdadelta = applyAdadelta' id applyAdadelta' :: forall v'4 v'5 v'6 v'7 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor Ref t -- ^ __var__ -> Tensor Ref t -- ^ __accum__ -> Tensor Ref t -- ^ __accum_update__ -> Tensor v'4 t -- ^ __lr__ -> Tensor v'5 t -- ^ __rho__ -> Tensor v'6 t -- ^ __epsilon__ -> Tensor v'7 t -- ^ __grad__ -> m' (Tensor Ref t) -- ^ __out__ applyAdadelta' op'options var accum accum_update lr rho epsilon grad | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs accum, buildInputs accum_update, buildInputs lr, buildInputs rho, buildInputs epsilon, buildInputs grad] buildOp [] (opDef "ApplyAdadelta" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" type_attr: "T" is_ref: true } input_arg { name: "accum" type_attr: "T" is_ref: true } input_arg { name: "accum_update" type_attr: "T" is_ref: true } input_arg { name: "lr" type_attr: "T" } input_arg { name: "rho" type_attr: "T" } input_arg { name: "epsilon" type_attr: "T" } input_arg { name: "grad" type_attr: "T" } output_arg { name: "out" type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } } -} -- | applyAdagrad :: forall v'3 v'4 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor Ref t -- ^ __var__ -> Tensor Ref t -- ^ __accum__ -> Tensor v'3 t -- ^ __lr__ -> Tensor v'4 t -- ^ __grad__ -> m' (Tensor Ref t) -- ^ __out__ applyAdagrad = applyAdagrad' id applyAdagrad' :: forall v'3 v'4 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor Ref t -- ^ __var__ -> Tensor Ref t -- ^ __accum__ -> Tensor v'3 t -- ^ __lr__ -> Tensor v'4 t -- ^ __grad__ -> m' (Tensor Ref t) -- ^ __out__ applyAdagrad' op'options var accum lr grad | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs accum, buildInputs lr, buildInputs grad] buildOp [] (opDef "ApplyAdagrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" type_attr: "T" is_ref: true } input_arg { name: "accum" type_attr: "T" is_ref: true } input_arg { name: "lr" type_attr: "T" } input_arg { name: "grad" type_attr: "T" } output_arg { name: "out" type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } } attr { name: "update_slots" type: "bool" default_value { b: true } } -} -- | applyAdagradDA :: forall v'4 v'5 v'6 v'7 v'8 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor Ref t -- ^ __var__ -> Tensor Ref t -- ^ __gradient_accumulator__ -> Tensor Ref t -- ^ __gradient_squared_accumulator__ -> Tensor v'4 t -- ^ __grad__ -> Tensor v'5 t -- ^ __lr__ -> Tensor v'6 t -- ^ __l1__ -> Tensor v'7 t -- ^ __l2__ -> Tensor v'8 Data.Int.Int64 -- ^ __global_step__ -> m' (Tensor Ref t) -- ^ __out__ applyAdagradDA = applyAdagradDA' id applyAdagradDA' :: forall v'4 v'5 v'6 v'7 v'8 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor Ref t -- ^ __var__ -> Tensor Ref t -- ^ __gradient_accumulator__ -> Tensor Ref t -- ^ __gradient_squared_accumulator__ -> Tensor v'4 t -- ^ __grad__ -> Tensor v'5 t -- ^ __lr__ -> Tensor v'6 t -- ^ __l1__ -> Tensor v'7 t -- ^ __l2__ -> Tensor v'8 Data.Int.Int64 -- ^ __global_step__ -> m' (Tensor Ref t) -- ^ __out__ applyAdagradDA' op'options var gradient_accumulator gradient_squared_accumulator grad lr l1 l2 global_step | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs gradient_accumulator, buildInputs gradient_squared_accumulator, buildInputs grad, buildInputs lr, buildInputs l1, buildInputs l2, buildInputs global_step] buildOp [] (opDef "ApplyAdagradDA" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" type_attr: "T" is_ref: true } input_arg { name: "gradient_accumulator" type_attr: "T" is_ref: true } input_arg { name: "gradient_squared_accumulator" type_attr: "T" is_ref: true } input_arg { name: "grad" type_attr: "T" } input_arg { name: "lr" type_attr: "T" } input_arg { name: "l1" type_attr: "T" } input_arg { name: "l2" type_attr: "T" } input_arg { name: "global_step" type: DT_INT64 } output_arg { name: "out" type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } } -} -- | applyAdam :: forall v'4 v'5 v'6 v'7 v'8 v'9 v'10 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor Ref t -- ^ __var__ -> Tensor Ref t -- ^ __m__ -> Tensor Ref t -- ^ __v__ -> Tensor v'4 t -- ^ __beta1_power__ -> Tensor v'5 t -- ^ __beta2_power__ -> Tensor v'6 t -- ^ __lr__ -> Tensor v'7 t -- ^ __beta1__ -> Tensor v'8 t -- ^ __beta2__ -> Tensor v'9 t -- ^ __epsilon__ -> Tensor v'10 t -- ^ __grad__ -> m' (Tensor Ref t) -- ^ __out__ applyAdam = applyAdam' id applyAdam' :: forall v'4 v'5 v'6 v'7 v'8 v'9 v'10 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor Ref t -- ^ __var__ -> Tensor Ref t -- ^ __m__ -> Tensor Ref t -- ^ __v__ -> Tensor v'4 t -- ^ __beta1_power__ -> Tensor v'5 t -- ^ __beta2_power__ -> Tensor v'6 t -- ^ __lr__ -> Tensor v'7 t -- ^ __beta1__ -> Tensor v'8 t -- ^ __beta2__ -> Tensor v'9 t -- ^ __epsilon__ -> Tensor v'10 t -- ^ __grad__ -> m' (Tensor Ref t) -- ^ __out__ applyAdam' op'options var m v beta1_power beta2_power lr beta1 beta2 epsilon grad | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs m, buildInputs v, buildInputs beta1_power, buildInputs beta2_power, buildInputs lr, buildInputs beta1, buildInputs beta2, buildInputs epsilon, buildInputs grad] buildOp [] (opDef "ApplyAdam" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" type_attr: "T" is_ref: true } input_arg { name: "m" type_attr: "T" is_ref: true } input_arg { name: "v" type_attr: "T" is_ref: true } input_arg { name: "beta1_power" type_attr: "T" } input_arg { name: "beta2_power" type_attr: "T" } input_arg { name: "lr" type_attr: "T" } input_arg { name: "beta1" type_attr: "T" } input_arg { name: "beta2" type_attr: "T" } input_arg { name: "epsilon" type_attr: "T" } input_arg { name: "grad" type_attr: "T" } output_arg { name: "out" type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } } attr { name: "use_nesterov" type: "bool" default_value { b: false } } -} -- | applyAddSign :: forall v'3 v'4 v'5 v'6 v'7 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor Ref t -- ^ __var__ -> Tensor Ref t -- ^ __m__ -> Tensor v'3 t -- ^ __lr__ -> Tensor v'4 t -- ^ __alpha__ -> Tensor v'5 t -- ^ __sign_decay__ -> Tensor v'6 t -- ^ __beta__ -> Tensor v'7 t -- ^ __grad__ -> m' (Tensor Ref t) -- ^ __out__ applyAddSign = applyAddSign' id applyAddSign' :: forall v'3 v'4 v'5 v'6 v'7 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor Ref t -- ^ __var__ -> Tensor Ref t -- ^ __m__ -> Tensor v'3 t -- ^ __lr__ -> Tensor v'4 t -- ^ __alpha__ -> Tensor v'5 t -- ^ __sign_decay__ -> Tensor v'6 t -- ^ __beta__ -> Tensor v'7 t -- ^ __grad__ -> m' (Tensor Ref t) -- ^ __out__ applyAddSign' op'options var m lr alpha sign_decay beta grad | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs m, buildInputs lr, buildInputs alpha, buildInputs sign_decay, buildInputs beta, buildInputs grad] buildOp [] (opDef "ApplyAddSign" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" type_attr: "T" is_ref: true } input_arg { name: "m" type_attr: "T" is_ref: true } input_arg { name: "lr" type_attr: "T" } input_arg { name: "alpha" type_attr: "T" } input_arg { name: "sign_decay" type_attr: "T" } input_arg { name: "beta" type_attr: "T" } input_arg { name: "grad" type_attr: "T" } output_arg { name: "out" type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } } -} -- | applyCenteredRMSProp :: forall v'5 v'6 v'7 v'8 v'9 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor Ref t -- ^ __var__ -> Tensor Ref t -- ^ __mg__ -> Tensor Ref t -- ^ __ms__ -> Tensor Ref t -- ^ __mom__ -> Tensor v'5 t -- ^ __lr__ -> Tensor v'6 t -- ^ __rho__ -> Tensor v'7 t -- ^ __momentum__ -> Tensor v'8 t -- ^ __epsilon__ -> Tensor v'9 t -- ^ __grad__ -> m' (Tensor Ref t) -- ^ __out__ applyCenteredRMSProp = applyCenteredRMSProp' id applyCenteredRMSProp' :: forall v'5 v'6 v'7 v'8 v'9 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor Ref t -- ^ __var__ -> Tensor Ref t -- ^ __mg__ -> Tensor Ref t -- ^ __ms__ -> Tensor Ref t -- ^ __mom__ -> Tensor v'5 t -- ^ __lr__ -> Tensor v'6 t -- ^ __rho__ -> Tensor v'7 t -- ^ __momentum__ -> Tensor v'8 t -- ^ __epsilon__ -> Tensor v'9 t -- ^ __grad__ -> m' (Tensor Ref t) -- ^ __out__ applyCenteredRMSProp' op'options var mg ms mom lr rho momentum epsilon grad | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs mg, buildInputs ms, buildInputs mom, buildInputs lr, buildInputs rho, buildInputs momentum, buildInputs epsilon, buildInputs grad] buildOp [] (opDef "ApplyCenteredRMSProp" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" type_attr: "T" is_ref: true } input_arg { name: "mg" type_attr: "T" is_ref: true } input_arg { name: "ms" type_attr: "T" is_ref: true } input_arg { name: "mom" type_attr: "T" is_ref: true } input_arg { name: "lr" type_attr: "T" } input_arg { name: "rho" type_attr: "T" } input_arg { name: "momentum" type_attr: "T" } input_arg { name: "epsilon" type_attr: "T" } input_arg { name: "grad" type_attr: "T" } output_arg { name: "out" type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } } -} -- | applyFtrl :: forall v'4 v'5 v'6 v'7 v'8 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor Ref t -- ^ __var__ -> Tensor Ref t -- ^ __accum__ -> Tensor Ref t -- ^ __linear__ -> Tensor v'4 t -- ^ __grad__ -> Tensor v'5 t -- ^ __lr__ -> Tensor v'6 t -- ^ __l1__ -> Tensor v'7 t -- ^ __l2__ -> Tensor v'8 t -- ^ __lr_power__ -> m' (Tensor Ref t) -- ^ __out__ applyFtrl = applyFtrl' id applyFtrl' :: forall v'4 v'5 v'6 v'7 v'8 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor Ref t -- ^ __var__ -> Tensor Ref t -- ^ __accum__ -> Tensor Ref t -- ^ __linear__ -> Tensor v'4 t -- ^ __grad__ -> Tensor v'5 t -- ^ __lr__ -> Tensor v'6 t -- ^ __l1__ -> Tensor v'7 t -- ^ __l2__ -> Tensor v'8 t -- ^ __lr_power__ -> m' (Tensor Ref t) -- ^ __out__ applyFtrl' op'options var accum linear grad lr l1 l2 lr_power | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs accum, buildInputs linear, buildInputs grad, buildInputs lr, buildInputs l1, buildInputs l2, buildInputs lr_power] buildOp [] (opDef "ApplyFtrl" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" type_attr: "T" is_ref: true } input_arg { name: "accum" type_attr: "T" is_ref: true } input_arg { name: "linear" type_attr: "T" is_ref: true } input_arg { name: "grad" type_attr: "T" } input_arg { name: "lr" type_attr: "T" } input_arg { name: "l1" type_attr: "T" } input_arg { name: "l2" type_attr: "T" } input_arg { name: "lr_power" type_attr: "T" } output_arg { name: "out" type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } } -} -- | applyFtrlV2 :: forall v'4 v'5 v'6 v'7 v'8 v'9 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor Ref t -- ^ __var__ -> Tensor Ref t -- ^ __accum__ -> Tensor Ref t -- ^ __linear__ -> Tensor v'4 t -- ^ __grad__ -> Tensor v'5 t -- ^ __lr__ -> Tensor v'6 t -- ^ __l1__ -> Tensor v'7 t -- ^ __l2__ -> Tensor v'8 t -- ^ __l2_shrinkage__ -> Tensor v'9 t -- ^ __lr_power__ -> m' (Tensor Ref t) -- ^ __out__ applyFtrlV2 = applyFtrlV2' id applyFtrlV2' :: forall v'4 v'5 v'6 v'7 v'8 v'9 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor Ref t -- ^ __var__ -> Tensor Ref t -- ^ __accum__ -> Tensor Ref t -- ^ __linear__ -> Tensor v'4 t -- ^ __grad__ -> Tensor v'5 t -- ^ __lr__ -> Tensor v'6 t -- ^ __l1__ -> Tensor v'7 t -- ^ __l2__ -> Tensor v'8 t -- ^ __l2_shrinkage__ -> Tensor v'9 t -- ^ __lr_power__ -> m' (Tensor Ref t) -- ^ __out__ applyFtrlV2' op'options var accum linear grad lr l1 l2 l2_shrinkage lr_power | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs accum, buildInputs linear, buildInputs grad, buildInputs lr, buildInputs l1, buildInputs l2, buildInputs l2_shrinkage, buildInputs lr_power] buildOp [] (opDef "ApplyFtrlV2" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" type_attr: "T" is_ref: true } input_arg { name: "accum" type_attr: "T" is_ref: true } input_arg { name: "linear" type_attr: "T" is_ref: true } input_arg { name: "grad" type_attr: "T" } input_arg { name: "lr" type_attr: "T" } input_arg { name: "l1" type_attr: "T" } input_arg { name: "l2" type_attr: "T" } input_arg { name: "l2_shrinkage" type_attr: "T" } input_arg { name: "lr_power" type_attr: "T" } output_arg { name: "out" type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } } -} -- | applyGradientDescent :: forall v'2 v'3 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor Ref t -- ^ __var__ -> Tensor v'2 t -- ^ __alpha__ -> Tensor v'3 t -- ^ __delta__ -> m' (Tensor Ref t) -- ^ __out__ applyGradientDescent = applyGradientDescent' id applyGradientDescent' :: forall v'2 v'3 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor Ref t -- ^ __var__ -> Tensor v'2 t -- ^ __alpha__ -> Tensor v'3 t -- ^ __delta__ -> m' (Tensor Ref t) -- ^ __out__ applyGradientDescent' op'options var alpha delta | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs alpha, buildInputs delta] buildOp [] (opDef "ApplyGradientDescent" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" type_attr: "T" is_ref: true } input_arg { name: "alpha" type_attr: "T" } input_arg { name: "delta" type_attr: "T" } output_arg { name: "out" type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } } -} -- | applyMomentum :: forall v'3 v'4 v'5 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor Ref t -- ^ __var__ -> Tensor Ref t -- ^ __accum__ -> Tensor v'3 t -- ^ __lr__ -> Tensor v'4 t -- ^ __grad__ -> Tensor v'5 t -- ^ __momentum__ -> m' (Tensor Ref t) -- ^ __out__ applyMomentum = applyMomentum' id applyMomentum' :: forall v'3 v'4 v'5 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor Ref t -- ^ __var__ -> Tensor Ref t -- ^ __accum__ -> Tensor v'3 t -- ^ __lr__ -> Tensor v'4 t -- ^ __grad__ -> Tensor v'5 t -- ^ __momentum__ -> m' (Tensor Ref t) -- ^ __out__ applyMomentum' op'options var accum lr grad momentum | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs accum, buildInputs lr, buildInputs grad, buildInputs momentum] buildOp [] (opDef "ApplyMomentum" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" type_attr: "T" is_ref: true } input_arg { name: "accum" type_attr: "T" is_ref: true } input_arg { name: "lr" type_attr: "T" } input_arg { name: "grad" type_attr: "T" } input_arg { name: "momentum" type_attr: "T" } output_arg { name: "out" type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } } attr { name: "use_nesterov" type: "bool" default_value { b: false } } -} -- | applyPowerSign :: forall v'3 v'4 v'5 v'6 v'7 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor Ref t -- ^ __var__ -> Tensor Ref t -- ^ __m__ -> Tensor v'3 t -- ^ __lr__ -> Tensor v'4 t -- ^ __logbase__ -> Tensor v'5 t -- ^ __sign_decay__ -> Tensor v'6 t -- ^ __beta__ -> Tensor v'7 t -- ^ __grad__ -> m' (Tensor Ref t) -- ^ __out__ applyPowerSign = applyPowerSign' id applyPowerSign' :: forall v'3 v'4 v'5 v'6 v'7 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor Ref t -- ^ __var__ -> Tensor Ref t -- ^ __m__ -> Tensor v'3 t -- ^ __lr__ -> Tensor v'4 t -- ^ __logbase__ -> Tensor v'5 t -- ^ __sign_decay__ -> Tensor v'6 t -- ^ __beta__ -> Tensor v'7 t -- ^ __grad__ -> m' (Tensor Ref t) -- ^ __out__ applyPowerSign' op'options var m lr logbase sign_decay beta grad | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs m, buildInputs lr, buildInputs logbase, buildInputs sign_decay, buildInputs beta, buildInputs grad] buildOp [] (opDef "ApplyPowerSign" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" type_attr: "T" is_ref: true } input_arg { name: "m" type_attr: "T" is_ref: true } input_arg { name: "lr" type_attr: "T" } input_arg { name: "logbase" type_attr: "T" } input_arg { name: "sign_decay" type_attr: "T" } input_arg { name: "beta" type_attr: "T" } input_arg { name: "grad" type_attr: "T" } output_arg { name: "out" type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } } -} -- | applyProximalAdagrad :: forall v'3 v'4 v'5 v'6 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor Ref t -- ^ __var__ -> Tensor Ref t -- ^ __accum__ -> Tensor v'3 t -- ^ __lr__ -> Tensor v'4 t -- ^ __l1__ -> Tensor v'5 t -- ^ __l2__ -> Tensor v'6 t -- ^ __grad__ -> m' (Tensor Ref t) -- ^ __out__ applyProximalAdagrad = applyProximalAdagrad' id applyProximalAdagrad' :: forall v'3 v'4 v'5 v'6 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor Ref t -- ^ __var__ -> Tensor Ref t -- ^ __accum__ -> Tensor v'3 t -- ^ __lr__ -> Tensor v'4 t -- ^ __l1__ -> Tensor v'5 t -- ^ __l2__ -> Tensor v'6 t -- ^ __grad__ -> m' (Tensor Ref t) -- ^ __out__ applyProximalAdagrad' op'options var accum lr l1 l2 grad | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs accum, buildInputs lr, buildInputs l1, buildInputs l2, buildInputs grad] buildOp [] (opDef "ApplyProximalAdagrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" type_attr: "T" is_ref: true } input_arg { name: "accum" type_attr: "T" is_ref: true } input_arg { name: "lr" type_attr: "T" } input_arg { name: "l1" type_attr: "T" } input_arg { name: "l2" type_attr: "T" } input_arg { name: "grad" type_attr: "T" } output_arg { name: "out" type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } } -} -- | applyProximalGradientDescent :: forall v'2 v'3 v'4 v'5 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor Ref t -- ^ __var__ -> Tensor v'2 t -- ^ __alpha__ -> Tensor v'3 t -- ^ __l1__ -> Tensor v'4 t -- ^ __l2__ -> Tensor v'5 t -- ^ __delta__ -> m' (Tensor Ref t) -- ^ __out__ applyProximalGradientDescent = applyProximalGradientDescent' id applyProximalGradientDescent' :: forall v'2 v'3 v'4 v'5 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor Ref t -- ^ __var__ -> Tensor v'2 t -- ^ __alpha__ -> Tensor v'3 t -- ^ __l1__ -> Tensor v'4 t -- ^ __l2__ -> Tensor v'5 t -- ^ __delta__ -> m' (Tensor Ref t) -- ^ __out__ applyProximalGradientDescent' op'options var alpha l1 l2 delta | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs alpha, buildInputs l1, buildInputs l2, buildInputs delta] buildOp [] (opDef "ApplyProximalGradientDescent" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" type_attr: "T" is_ref: true } input_arg { name: "alpha" type_attr: "T" } input_arg { name: "l1" type_attr: "T" } input_arg { name: "l2" type_attr: "T" } input_arg { name: "delta" type_attr: "T" } output_arg { name: "out" type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } } -} -- | applyRMSProp :: forall v'4 v'5 v'6 v'7 v'8 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor Ref t -- ^ __var__ -> Tensor Ref t -- ^ __ms__ -> Tensor Ref t -- ^ __mom__ -> Tensor v'4 t -- ^ __lr__ -> Tensor v'5 t -- ^ __rho__ -> Tensor v'6 t -- ^ __momentum__ -> Tensor v'7 t -- ^ __epsilon__ -> Tensor v'8 t -- ^ __grad__ -> m' (Tensor Ref t) -- ^ __out__ applyRMSProp = applyRMSProp' id applyRMSProp' :: forall v'4 v'5 v'6 v'7 v'8 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor Ref t -- ^ __var__ -> Tensor Ref t -- ^ __ms__ -> Tensor Ref t -- ^ __mom__ -> Tensor v'4 t -- ^ __lr__ -> Tensor v'5 t -- ^ __rho__ -> Tensor v'6 t -- ^ __momentum__ -> Tensor v'7 t -- ^ __epsilon__ -> Tensor v'8 t -- ^ __grad__ -> m' (Tensor Ref t) -- ^ __out__ applyRMSProp' op'options var ms mom lr rho momentum epsilon grad | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs ms, buildInputs mom, buildInputs lr, buildInputs rho, buildInputs momentum, buildInputs epsilon, buildInputs grad] buildOp [] (opDef "ApplyRMSProp" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" type_attr: "T" is_ref: true } input_arg { name: "ms" type_attr: "T" is_ref: true } input_arg { name: "mom" type_attr: "T" is_ref: true } input_arg { name: "lr" type_attr: "T" } input_arg { name: "rho" type_attr: "T" } input_arg { name: "momentum" type_attr: "T" } input_arg { name: "epsilon" type_attr: "T" } input_arg { name: "grad" type_attr: "T" } output_arg { name: "out" type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } } -} -- | approximateEqual :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build Bool -- ^ __z__ approximateEqual = approximateEqual' id approximateEqual' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build Bool -- ^ __z__ approximateEqual' op'options x y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y] return (opDef "ApproximateEqual" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type: DT_BOOL } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "tolerance" type: "float" default_value { f: 1.0e-5 } } -} -- | argMax :: forall v'1 v'2 t tidx output_type . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx, OneOf '[Data.Int.Int32, Data.Int.Int64] output_type) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 tidx -- ^ __dimension__ -> Tensor Build output_type -- ^ __output__ argMax = argMax' id argMax' :: forall v'1 v'2 t tidx output_type . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx, OneOf '[Data.Int.Int32, Data.Int.Int64] output_type) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 tidx -- ^ __dimension__ -> Tensor Build output_type -- ^ __output__ argMax' op'options input dimension | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs dimension] return (opDef "ArgMax" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tidx" .~ tensorType (undefined :: tidx) & opAttr "output_type" .~ tensorType (undefined :: output_type) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "dimension" type_attr: "Tidx" } output_arg { name: "output" type_attr: "output_type" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tidx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "output_type" type: "type" default_value { type: DT_INT64 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | argMin :: forall v'1 v'2 t tidx output_type . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx, OneOf '[Data.Int.Int32, Data.Int.Int64] output_type) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 tidx -- ^ __dimension__ -> Tensor Build output_type -- ^ __output__ argMin = argMin' id argMin' :: forall v'1 v'2 t tidx output_type . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx, OneOf '[Data.Int.Int32, Data.Int.Int64] output_type) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 tidx -- ^ __dimension__ -> Tensor Build output_type -- ^ __output__ argMin' op'options input dimension | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs dimension] return (opDef "ArgMin" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tidx" .~ tensorType (undefined :: tidx) & opAttr "output_type" .~ tensorType (undefined :: output_type) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "dimension" type_attr: "Tidx" } output_arg { name: "output" type_attr: "output_type" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tidx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "output_type" type: "type" default_value { type: DT_INT64 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | asString :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Bool, Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Double, Float] t) => Tensor v'1 t -- ^ __input__ -> Tensor Build Data.ByteString.ByteString -- ^ __output__ asString = asString' id asString' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Bool, Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor Build Data.ByteString.ByteString -- ^ __output__ asString' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "AsString" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type: DT_STRING } attr { name: "T" type: "type" allowed_values { list { type: DT_INT8 type: DT_INT16 type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_FLOAT type: DT_DOUBLE type: DT_BOOL } } } attr { name: "precision" type: "int" default_value { i: -1 } } attr { name: "scientific" type: "bool" default_value { b: false } } attr { name: "shortest" type: "bool" default_value { b: false } } attr { name: "width" type: "int" default_value { i: -1 } } attr { name: "fill" type: "string" default_value { s: "" } } -} -- | asin :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ asin = asin' id asin' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ asin' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "Asin" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | asinh :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ asinh = asinh' id asinh' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ asinh' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "Asinh" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | assert :: forall v'1 v'2 t m' . (MonadBuild m', TensorTypes t) => Tensor v'1 Bool -- ^ __condition__ -> TensorList (v'2) t -- ^ __data__ -> m' (ControlNode) assert = assert' id assert' :: forall v'1 v'2 t m' . (MonadBuild m', TensorTypes t) => OpParams -> Tensor v'1 Bool -- ^ __condition__ -> TensorList (v'2) t -- ^ __data__ -> m' (ControlNode) assert' op'options condition data' | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs condition, buildInputs data'] buildOp [] (opDef "Assert" & opAttr "T" .~ fromTensorTypes (Proxy :: Proxy t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "condition" type: DT_BOOL } input_arg { name: "data" type_list_attr: "T" } attr { name: "T" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "summarize" type: "int" default_value { i: 3 } } -} -- | assign :: forall v'2 t m' . (MonadBuild m', TensorType t) => Tensor Ref t -- ^ __ref__ -> Tensor v'2 t -- ^ __value__ -> m' (Tensor Ref t) -- ^ __output_ref__ assign = assign' id assign' :: forall v'2 t m' . (MonadBuild m', TensorType t) => OpParams -> Tensor Ref t -- ^ __ref__ -> Tensor v'2 t -- ^ __value__ -> m' (Tensor Ref t) -- ^ __output_ref__ assign' op'options ref value | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs ref, buildInputs value] buildOp [] (opDef "Assign" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "ref" type_attr: "T" is_ref: true } input_arg { name: "value" type_attr: "T" } output_arg { name: "output_ref" type_attr: "T" is_ref: true } attr { name: "T" type: "type" } attr { name: "validate_shape" type: "bool" default_value { b: true } } attr { name: "use_locking" type: "bool" default_value { b: true } } -} -- | assignAdd :: forall v'2 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor Ref t -- ^ __ref__ -> Tensor v'2 t -- ^ __value__ -> m' (Tensor Ref t) -- ^ __output_ref__ assignAdd = assignAdd' id assignAdd' :: forall v'2 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor Ref t -- ^ __ref__ -> Tensor v'2 t -- ^ __value__ -> m' (Tensor Ref t) -- ^ __output_ref__ assignAdd' op'options ref value | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs ref, buildInputs value] buildOp [] (opDef "AssignAdd" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "ref" type_attr: "T" is_ref: true } input_arg { name: "value" type_attr: "T" } output_arg { name: "output_ref" type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } } -} -- | assignAddVariableOp :: forall v'1 v'2 dtype m' . (MonadBuild m', TensorType dtype) => Tensor v'1 ResourceHandle -- ^ __resource__ -> Tensor v'2 dtype -- ^ __value__ -> m' (ControlNode) assignAddVariableOp = assignAddVariableOp' id assignAddVariableOp' :: forall v'1 v'2 dtype m' . (MonadBuild m', TensorType dtype) => OpParams -> Tensor v'1 ResourceHandle -- ^ __resource__ -> Tensor v'2 dtype -- ^ __value__ -> m' (ControlNode) assignAddVariableOp' op'options resource value | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs resource, buildInputs value] buildOp [] (opDef "AssignAddVariableOp" & opAttr "dtype" .~ tensorType (undefined :: dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "resource" type: DT_RESOURCE } input_arg { name: "value" type_attr: "dtype" } attr { name: "dtype" type: "type" } -} -- | assignSub :: forall v'2 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor Ref t -- ^ __ref__ -> Tensor v'2 t -- ^ __value__ -> m' (Tensor Ref t) -- ^ __output_ref__ assignSub = assignSub' id assignSub' :: forall v'2 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor Ref t -- ^ __ref__ -> Tensor v'2 t -- ^ __value__ -> m' (Tensor Ref t) -- ^ __output_ref__ assignSub' op'options ref value | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs ref, buildInputs value] buildOp [] (opDef "AssignSub" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "ref" type_attr: "T" is_ref: true } input_arg { name: "value" type_attr: "T" } output_arg { name: "output_ref" type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } } -} -- | assignSubVariableOp :: forall v'1 v'2 dtype m' . (MonadBuild m', TensorType dtype) => Tensor v'1 ResourceHandle -- ^ __resource__ -> Tensor v'2 dtype -- ^ __value__ -> m' (ControlNode) assignSubVariableOp = assignSubVariableOp' id assignSubVariableOp' :: forall v'1 v'2 dtype m' . (MonadBuild m', TensorType dtype) => OpParams -> Tensor v'1 ResourceHandle -- ^ __resource__ -> Tensor v'2 dtype -- ^ __value__ -> m' (ControlNode) assignSubVariableOp' op'options resource value | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs resource, buildInputs value] buildOp [] (opDef "AssignSubVariableOp" & opAttr "dtype" .~ tensorType (undefined :: dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "resource" type: DT_RESOURCE } input_arg { name: "value" type_attr: "dtype" } attr { name: "dtype" type: "type" } -} -- | assignVariableOp :: forall v'1 v'2 dtype m' . (MonadBuild m', TensorType dtype) => Tensor v'1 ResourceHandle -- ^ __resource__ -> Tensor v'2 dtype -- ^ __value__ -> m' (ControlNode) assignVariableOp = assignVariableOp' id assignVariableOp' :: forall v'1 v'2 dtype m' . (MonadBuild m', TensorType dtype) => OpParams -> Tensor v'1 ResourceHandle -- ^ __resource__ -> Tensor v'2 dtype -- ^ __value__ -> m' (ControlNode) assignVariableOp' op'options resource value | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs resource, buildInputs value] buildOp [] (opDef "AssignVariableOp" & opAttr "dtype" .~ tensorType (undefined :: dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "resource" type: DT_RESOURCE } input_arg { name: "value" type_attr: "dtype" } attr { name: "dtype" type: "type" } -} -- | atan :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ atan = atan' id atan' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ atan' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "Atan" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | atan2 :: forall v'1 v'2 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __y__ -> Tensor v'2 t -- ^ __x__ -> Tensor Build t -- ^ __z__ atan2 = atan2' id atan2' :: forall v'1 v'2 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __y__ -> Tensor v'2 t -- ^ __x__ -> Tensor Build t -- ^ __z__ atan2' op'options y x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs y, buildInputs x] return (opDef "Atan2" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "y" type_attr: "T" } input_arg { name: "x" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } -} -- | atanh :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ atanh = atanh' id atanh' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ atanh' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "Atanh" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | audioSpectrogram :: Data.Int.Int64 -- ^ __stride__ -> Data.Int.Int64 -- ^ __window_size__ -> Tensor v'1 Float -- ^ __input__ -> Tensor Build Float -- ^ __spectrogram__ audioSpectrogram = audioSpectrogram' id audioSpectrogram' :: OpParams -> Data.Int.Int64 -- ^ __stride__ -> Data.Int.Int64 -- ^ __window_size__ -> Tensor v'1 Float -- ^ __input__ -> Tensor Build Float -- ^ __spectrogram__ audioSpectrogram' op'options stride window_size input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "AudioSpectrogram" & opAttr "stride" .~ stride & opAttr "window_size" .~ window_size & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type: DT_FLOAT } output_arg { name: "spectrogram" type: DT_FLOAT } attr { name: "window_size" type: "int" } attr { name: "stride" type: "int" } attr { name: "magnitude_squared" type: "bool" default_value { b: false } } -} -- | audioSummary :: Float -- ^ __sample_rate__ -> Tensor v'1 Data.ByteString.ByteString -- ^ __tag__ -> Tensor v'2 Float -- ^ __tensor__ -> Tensor Build Data.ByteString.ByteString -- ^ __summary__ audioSummary = audioSummary' id audioSummary' :: OpParams -> Float -- ^ __sample_rate__ -> Tensor v'1 Data.ByteString.ByteString -- ^ __tag__ -> Tensor v'2 Float -- ^ __tensor__ -> Tensor Build Data.ByteString.ByteString -- ^ __summary__ audioSummary' op'options sample_rate tag tensor | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs tag, buildInputs tensor] return (opDef "AudioSummary" & opAttr "sample_rate" .~ sample_rate & op'options & opInputs .~ op'inputs) {- input_arg { name: "tag" type: DT_STRING } input_arg { name: "tensor" type: DT_FLOAT } output_arg { name: "summary" type: DT_STRING } attr { name: "sample_rate" type: "float" } attr { name: "max_outputs" type: "int" default_value { i: 3 } has_minimum: true minimum: 1 } -} -- | audioSummaryV2 :: Tensor v'1 Data.ByteString.ByteString -- ^ __tag__ -> Tensor v'2 Float -- ^ __tensor__ -> Tensor v'3 Float -- ^ __sample_rate__ -> Tensor Build Data.ByteString.ByteString -- ^ __summary__ audioSummaryV2 = audioSummaryV2' id audioSummaryV2' :: OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __tag__ -> Tensor v'2 Float -- ^ __tensor__ -> Tensor v'3 Float -- ^ __sample_rate__ -> Tensor Build Data.ByteString.ByteString -- ^ __summary__ audioSummaryV2' op'options tag tensor sample_rate | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs tag, buildInputs tensor, buildInputs sample_rate] return (opDef "AudioSummaryV2" & op'options & opInputs .~ op'inputs) {- input_arg { name: "tag" type: DT_STRING } input_arg { name: "tensor" type: DT_FLOAT } input_arg { name: "sample_rate" type: DT_FLOAT } output_arg { name: "summary" type: DT_STRING } attr { name: "max_outputs" type: "int" default_value { i: 3 } has_minimum: true minimum: 1 } -} -- | avgPool :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __value__ -> Tensor Build t -- ^ __output__ avgPool = avgPool' id avgPool' :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __value__ -> Tensor Build t -- ^ __output__ avgPool' op'options value | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs value] return (opDef "AvgPool" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "value" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "ksize" type: "list(int)" has_minimum: true minimum: 4 } attr { name: "strides" type: "list(int)" has_minimum: true minimum: 4 } attr { name: "padding" type: "string" allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "data_format" type: "string" default_value { s: "NHWC" } allowed_values { list { s: "NHWC" s: "NCHW" } } } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_BFLOAT16 type: DT_FLOAT type: DT_DOUBLE } } } -} -- | avgPool3D :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ avgPool3D = avgPool3D' id avgPool3D' :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ avgPool3D' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "AvgPool3D" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "ksize" type: "list(int)" has_minimum: true minimum: 5 } attr { name: "strides" type: "list(int)" has_minimum: true minimum: 5 } attr { name: "padding" type: "string" allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "data_format" type: "string" default_value { s: "NDHWC" } allowed_values { list { s: "NDHWC" s: "NCDHW" } } } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_BFLOAT16 type: DT_FLOAT type: DT_DOUBLE } } } -} -- | avgPool3DGrad :: forall v'1 v'2 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 Data.Int.Int32 -- ^ __orig_input_shape__ -> Tensor v'2 t -- ^ __grad__ -> Tensor Build t -- ^ __output__ avgPool3DGrad = avgPool3DGrad' id avgPool3DGrad' :: forall v'1 v'2 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 Data.Int.Int32 -- ^ __orig_input_shape__ -> Tensor v'2 t -- ^ __grad__ -> Tensor Build t -- ^ __output__ avgPool3DGrad' op'options orig_input_shape grad | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs orig_input_shape, buildInputs grad] return (opDef "AvgPool3DGrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "orig_input_shape" type: DT_INT32 } input_arg { name: "grad" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "ksize" type: "list(int)" has_minimum: true minimum: 5 } attr { name: "strides" type: "list(int)" has_minimum: true minimum: 5 } attr { name: "padding" type: "string" allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "data_format" type: "string" default_value { s: "NDHWC" } allowed_values { list { s: "NDHWC" s: "NCDHW" } } } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_BFLOAT16 type: DT_FLOAT type: DT_DOUBLE } } } -} -- | avgPoolGrad :: forall v'1 v'2 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 Data.Int.Int32 -- ^ __orig_input_shape__ -> Tensor v'2 t -- ^ __grad__ -> Tensor Build t -- ^ __output__ avgPoolGrad = avgPoolGrad' id avgPoolGrad' :: forall v'1 v'2 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 Data.Int.Int32 -- ^ __orig_input_shape__ -> Tensor v'2 t -- ^ __grad__ -> Tensor Build t -- ^ __output__ avgPoolGrad' op'options orig_input_shape grad | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs orig_input_shape, buildInputs grad] return (opDef "AvgPoolGrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "orig_input_shape" type: DT_INT32 } input_arg { name: "grad" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "ksize" type: "list(int)" has_minimum: true minimum: 4 } attr { name: "strides" type: "list(int)" has_minimum: true minimum: 4 } attr { name: "padding" type: "string" allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "data_format" type: "string" default_value { s: "NHWC" } allowed_values { list { s: "NHWC" s: "NCHW" } } } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_BFLOAT16 type: DT_FLOAT type: DT_DOUBLE } } } -} -- | barrier :: forall m' . (MonadBuild m') => [DataType] -- ^ __component_types__ -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __handle__ barrier = barrier' id barrier' :: forall m' . (MonadBuild m') => OpParams -> [DataType] -- ^ __component_types__ -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __handle__ barrier' op'options component_types | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "Barrier" & opAttr "component_types" .~ component_types & op'options & opInputs .~ op'inputs) {- output_arg { name: "handle" type: DT_STRING is_ref: true } attr { name: "component_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "shapes" type: "list(shape)" default_value { list { } } has_minimum: true } attr { name: "capacity" type: "int" default_value { i: -1 } } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } -} -- | barrierClose :: forall m' . (MonadBuild m') => Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> m' (ControlNode) barrierClose = barrierClose' id barrierClose' :: forall m' . (MonadBuild m') => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> m' (ControlNode) barrierClose' op'options handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle] buildOp [] (opDef "BarrierClose" & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_STRING is_ref: true } attr { name: "cancel_pending_enqueues" type: "bool" default_value { b: false } } -} -- | barrierIncompleteSize :: forall m' . (MonadBuild m') => Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> m' (Tensor Value Data.Int.Int32) -- ^ __size__ barrierIncompleteSize = barrierIncompleteSize' id barrierIncompleteSize' :: forall m' . (MonadBuild m') => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> m' (Tensor Value Data.Int.Int32) -- ^ __size__ barrierIncompleteSize' op'options handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle] buildOp [] (opDef "BarrierIncompleteSize" & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_STRING is_ref: true } output_arg { name: "size" type: DT_INT32 } -} -- | barrierInsertMany :: forall v'2 v'3 t m' . (MonadBuild m', TensorType t) => Data.Int.Int64 -- ^ __component_index__ -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __keys__ -> Tensor v'3 t -- ^ __values__ -> m' (ControlNode) barrierInsertMany = barrierInsertMany' id barrierInsertMany' :: forall v'2 v'3 t m' . (MonadBuild m', TensorType t) => OpParams -> Data.Int.Int64 -- ^ __component_index__ -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __keys__ -> Tensor v'3 t -- ^ __values__ -> m' (ControlNode) barrierInsertMany' op'options component_index handle keys values | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs keys, buildInputs values] buildOp [] (opDef "BarrierInsertMany" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "component_index" .~ component_index & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_STRING is_ref: true } input_arg { name: "keys" type: DT_STRING } input_arg { name: "values" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "component_index" type: "int" } -} -- | barrierReadySize :: forall m' . (MonadBuild m') => Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> m' (Tensor Value Data.Int.Int32) -- ^ __size__ barrierReadySize = barrierReadySize' id barrierReadySize' :: forall m' . (MonadBuild m') => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> m' (Tensor Value Data.Int.Int32) -- ^ __size__ barrierReadySize' op'options handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle] buildOp [] (opDef "BarrierReadySize" & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_STRING is_ref: true } output_arg { name: "size" type: DT_INT32 } -} -- | barrierTakeMany :: forall v'2 component_types m' . (MonadBuild m', TensorTypes component_types) => Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __num_elements__ -> m' ((Tensor Value Data.Int.Int64, Tensor Value Data.ByteString.ByteString, TensorList (Value) component_types)) -- ^ (__indices__, __keys__, __values__) -- -- * __indices__ -- -- * __keys__ -- -- * __values__ barrierTakeMany = barrierTakeMany' id barrierTakeMany' :: forall v'2 component_types m' . (MonadBuild m', TensorTypes component_types) => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __num_elements__ -> m' ((Tensor Value Data.Int.Int64, Tensor Value Data.ByteString.ByteString, TensorList (Value) component_types)) -- ^ (__indices__, __keys__, __values__) -- -- * __indices__ -- -- * __keys__ -- -- * __values__ barrierTakeMany' op'options handle num_elements | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs num_elements] buildOp [] (opDef "BarrierTakeMany" & opAttr "component_types" .~ fromTensorTypes (Proxy :: Proxy component_types) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_STRING is_ref: true } input_arg { name: "num_elements" type: DT_INT32 } output_arg { name: "indices" type: DT_INT64 } output_arg { name: "keys" type: DT_STRING } output_arg { name: "values" type_list_attr: "component_types" } attr { name: "component_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "allow_small_batch" type: "bool" default_value { b: false } } attr { name: "wait_for_incomplete" type: "bool" default_value { b: false } } attr { name: "timeout_ms" type: "int" default_value { i: -1 } } -} -- | batch :: forall v'1 t . (TensorTypes t) => Data.Int.Int64 -- ^ __batch_timeout_micros__ -> Data.Int.Int64 -- ^ __grad_timeout_micros__ -> Data.Int.Int64 -- ^ __max_batch_size__ -> Data.Int.Int64 -- ^ __num_batch_threads__ -> TensorList (v'1) t -- ^ __in_tensors__ -> (TensorList (Build) t, Tensor Build Data.Int.Int64, Tensor Build Data.Int.Int64) -- ^ (__batched_tensors__, __batch_index__, __id__) -- -- * __batched_tensors__ -- -- * __batch_index__ -- -- * __id__ batch = batch' id batch' :: forall v'1 t . (TensorTypes t) => OpParams -> Data.Int.Int64 -- ^ __batch_timeout_micros__ -> Data.Int.Int64 -- ^ __grad_timeout_micros__ -> Data.Int.Int64 -- ^ __max_batch_size__ -> Data.Int.Int64 -- ^ __num_batch_threads__ -> TensorList (v'1) t -- ^ __in_tensors__ -> (TensorList (Build) t, Tensor Build Data.Int.Int64, Tensor Build Data.Int.Int64) -- ^ (__batched_tensors__, __batch_index__, __id__) -- -- * __batched_tensors__ -- -- * __batch_index__ -- -- * __id__ batch' op'options batch_timeout_micros grad_timeout_micros max_batch_size num_batch_threads in_tensors | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs in_tensors] return (opDef "Batch" & opAttr "T" .~ fromTensorTypes (Proxy :: Proxy t) & opAttr "batch_timeout_micros" .~ batch_timeout_micros & opAttr "grad_timeout_micros" .~ grad_timeout_micros & opAttr "max_batch_size" .~ max_batch_size & opAttr "num_batch_threads" .~ num_batch_threads & op'options & opInputs .~ op'inputs) {- input_arg { name: "in_tensors" type_list_attr: "T" } output_arg { name: "batched_tensors" type_list_attr: "T" } output_arg { name: "batch_index" type: DT_INT64 } output_arg { name: "id" type: DT_INT64 } attr { name: "num_batch_threads" type: "int" } attr { name: "max_batch_size" type: "int" } attr { name: "max_enqueued_batches" type: "int" default_value { i: 10 } } attr { name: "batch_timeout_micros" type: "int" } attr { name: "allowed_batch_sizes" type: "list(int)" default_value { list { } } } attr { name: "grad_timeout_micros" type: "int" } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } attr { name: "batching_queue" type: "string" default_value { s: "" } } attr { name: "T" type: "list(type)" has_minimum: true minimum: 1 } -} -- | batchCholesky :: forall v'1 t . (OneOf '[Double, Float] t) => Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ batchCholesky = batchCholesky' id batchCholesky' :: forall v'1 t . (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ batchCholesky' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "BatchCholesky" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } } } -} -- | batchCholeskyGrad :: forall v'1 v'2 t . (OneOf '[Double, Float] t) => Tensor v'1 t -- ^ __l__ -> Tensor v'2 t -- ^ __grad__ -> Tensor Build t -- ^ __output__ batchCholeskyGrad = batchCholeskyGrad' id batchCholeskyGrad' :: forall v'1 v'2 t . (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __l__ -> Tensor v'2 t -- ^ __grad__ -> Tensor Build t -- ^ __output__ batchCholeskyGrad' op'options l grad | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs l, buildInputs grad] return (opDef "BatchCholeskyGrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "l" type_attr: "T" } input_arg { name: "grad" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } -} -- | batchDataset :: [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor v'2 Data.Int.Int64 -- ^ __batch_size__ -> Tensor Build Variant -- ^ __handle__ batchDataset = batchDataset' id batchDataset' :: OpParams -> [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor v'2 Data.Int.Int64 -- ^ __batch_size__ -> Tensor Build Variant -- ^ __handle__ batchDataset' op'options output_types input_dataset batch_size | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_dataset, buildInputs batch_size] return (opDef "BatchDataset" & opAttr "output_types" .~ output_types & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_dataset" type: DT_VARIANT } input_arg { name: "batch_size" type: DT_INT64 } output_arg { name: "handle" type: DT_VARIANT } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } -} -- | batchDatasetV2 :: [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor v'2 Data.Int.Int64 -- ^ __batch_size__ -> Tensor v'3 Bool -- ^ __drop_remainder__ -> Tensor Build Variant -- ^ __handle__ batchDatasetV2 = batchDatasetV2' id batchDatasetV2' :: OpParams -> [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor v'2 Data.Int.Int64 -- ^ __batch_size__ -> Tensor v'3 Bool -- ^ __drop_remainder__ -> Tensor Build Variant -- ^ __handle__ batchDatasetV2' op'options output_types input_dataset batch_size drop_remainder | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_dataset, buildInputs batch_size, buildInputs drop_remainder] return (opDef "BatchDatasetV2" & opAttr "output_types" .~ output_types & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_dataset" type: DT_VARIANT } input_arg { name: "batch_size" type: DT_INT64 } input_arg { name: "drop_remainder" type: DT_BOOL } output_arg { name: "handle" type: DT_VARIANT } attr { name: "parallel_copy" type: "bool" default_value { b: false } } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } -} -- | batchFFT :: Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__ -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__ batchFFT = batchFFT' id batchFFT' :: OpParams -> Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__ -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__ batchFFT' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "BatchFFT" & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type: DT_COMPLEX64 } output_arg { name: "output" type: DT_COMPLEX64 } -} -- | batchFFT2D :: Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__ -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__ batchFFT2D = batchFFT2D' id batchFFT2D' :: OpParams -> Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__ -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__ batchFFT2D' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "BatchFFT2D" & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type: DT_COMPLEX64 } output_arg { name: "output" type: DT_COMPLEX64 } -} -- | batchFFT3D :: Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__ -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__ batchFFT3D = batchFFT3D' id batchFFT3D' :: OpParams -> Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__ -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__ batchFFT3D' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "BatchFFT3D" & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type: DT_COMPLEX64 } output_arg { name: "output" type: DT_COMPLEX64 } -} -- | batchIFFT :: Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__ -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__ batchIFFT = batchIFFT' id batchIFFT' :: OpParams -> Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__ -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__ batchIFFT' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "BatchIFFT" & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type: DT_COMPLEX64 } output_arg { name: "output" type: DT_COMPLEX64 } -} -- | batchIFFT2D :: Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__ -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__ batchIFFT2D = batchIFFT2D' id batchIFFT2D' :: OpParams -> Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__ -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__ batchIFFT2D' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "BatchIFFT2D" & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type: DT_COMPLEX64 } output_arg { name: "output" type: DT_COMPLEX64 } -} -- | batchIFFT3D :: Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__ -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__ batchIFFT3D = batchIFFT3D' id batchIFFT3D' :: OpParams -> Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__ -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__ batchIFFT3D' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "BatchIFFT3D" & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type: DT_COMPLEX64 } output_arg { name: "output" type: DT_COMPLEX64 } -} -- | batchMatMul :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __output__ batchMatMul = batchMatMul' id batchMatMul' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __output__ batchMatMul' op'options x y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y] return (opDef "BatchMatMul" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 } } } attr { name: "adj_x" type: "bool" default_value { b: false } } attr { name: "adj_y" type: "bool" default_value { b: false } } -} -- | batchMatMulV2 :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __output__ batchMatMulV2 = batchMatMulV2' id batchMatMulV2' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __output__ batchMatMulV2' op'options x y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y] return (opDef "BatchMatMulV2" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 } } } attr { name: "adj_x" type: "bool" default_value { b: false } } attr { name: "adj_y" type: "bool" default_value { b: false } } -} -- | batchMatrixBandPart :: forall v'1 v'2 v'3 t . (TensorType t) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 Data.Int.Int64 -- ^ __num_lower__ -> Tensor v'3 Data.Int.Int64 -- ^ __num_upper__ -> Tensor Build t -- ^ __band__ batchMatrixBandPart = batchMatrixBandPart' id batchMatrixBandPart' :: forall v'1 v'2 v'3 t . (TensorType t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 Data.Int.Int64 -- ^ __num_lower__ -> Tensor v'3 Data.Int.Int64 -- ^ __num_upper__ -> Tensor Build t -- ^ __band__ batchMatrixBandPart' op'options input num_lower num_upper | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs num_lower, buildInputs num_upper] return (opDef "BatchMatrixBandPart" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "num_lower" type: DT_INT64 } input_arg { name: "num_upper" type: DT_INT64 } output_arg { name: "band" type_attr: "T" } attr { name: "T" type: "type" } -} -- | batchMatrixDeterminant :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Double, Float] t) => Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ batchMatrixDeterminant = batchMatrixDeterminant' id batchMatrixDeterminant' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ batchMatrixDeterminant' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "BatchMatrixDeterminant" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | batchMatrixDiag :: forall v'1 t . (TensorType t) => Tensor v'1 t -- ^ __diagonal__ -> Tensor Build t -- ^ __output__ batchMatrixDiag = batchMatrixDiag' id batchMatrixDiag' :: forall v'1 t . (TensorType t) => OpParams -> Tensor v'1 t -- ^ __diagonal__ -> Tensor Build t -- ^ __output__ batchMatrixDiag' op'options diagonal | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs diagonal] return (opDef "BatchMatrixDiag" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "diagonal" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } -} -- | batchMatrixDiagPart :: forall v'1 t . (TensorType t) => Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __diagonal__ batchMatrixDiagPart = batchMatrixDiagPart' id batchMatrixDiagPart' :: forall v'1 t . (TensorType t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __diagonal__ batchMatrixDiagPart' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "BatchMatrixDiagPart" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "diagonal" type_attr: "T" } attr { name: "T" type: "type" } -} -- | batchMatrixInverse :: forall v'1 t . (OneOf '[Double, Float] t) => Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ batchMatrixInverse = batchMatrixInverse' id batchMatrixInverse' :: forall v'1 t . (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ batchMatrixInverse' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "BatchMatrixInverse" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "adjoint" type: "bool" default_value { b: false } } attr { name: "T" type: "type" allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } } } -} -- | batchMatrixSetDiag :: forall v'1 v'2 t . (TensorType t) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 t -- ^ __diagonal__ -> Tensor Build t -- ^ __output__ batchMatrixSetDiag = batchMatrixSetDiag' id batchMatrixSetDiag' :: forall v'1 v'2 t . (TensorType t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 t -- ^ __diagonal__ -> Tensor Build t -- ^ __output__ batchMatrixSetDiag' op'options input diagonal | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs diagonal] return (opDef "BatchMatrixSetDiag" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "diagonal" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } -} -- | batchMatrixSolve :: forall v'1 v'2 t . (OneOf '[Double, Float] t) => Tensor v'1 t -- ^ __matrix__ -> Tensor v'2 t -- ^ __rhs__ -> Tensor Build t -- ^ __output__ batchMatrixSolve = batchMatrixSolve' id batchMatrixSolve' :: forall v'1 v'2 t . (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __matrix__ -> Tensor v'2 t -- ^ __rhs__ -> Tensor Build t -- ^ __output__ batchMatrixSolve' op'options matrix rhs | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs matrix, buildInputs rhs] return (opDef "BatchMatrixSolve" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "matrix" type_attr: "T" } input_arg { name: "rhs" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "adjoint" type: "bool" default_value { b: false } } attr { name: "T" type: "type" allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } } } -} -- | batchMatrixSolveLs :: forall v'1 v'2 v'3 t . (OneOf '[Double, Float] t) => Tensor v'1 t -- ^ __matrix__ -> Tensor v'2 t -- ^ __rhs__ -> Tensor v'3 Double -- ^ __l2_regularizer__ -> Tensor Build t -- ^ __output__ batchMatrixSolveLs = batchMatrixSolveLs' id batchMatrixSolveLs' :: forall v'1 v'2 v'3 t . (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __matrix__ -> Tensor v'2 t -- ^ __rhs__ -> Tensor v'3 Double -- ^ __l2_regularizer__ -> Tensor Build t -- ^ __output__ batchMatrixSolveLs' op'options matrix rhs l2_regularizer | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs matrix, buildInputs rhs, buildInputs l2_regularizer] return (opDef "BatchMatrixSolveLs" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "matrix" type_attr: "T" } input_arg { name: "rhs" type_attr: "T" } input_arg { name: "l2_regularizer" type: DT_DOUBLE } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } } } attr { name: "fast" type: "bool" default_value { b: true } } -} -- | batchMatrixTriangularSolve :: forall v'1 v'2 t . (OneOf '[Double, Float] t) => Tensor v'1 t -- ^ __matrix__ -> Tensor v'2 t -- ^ __rhs__ -> Tensor Build t -- ^ __output__ batchMatrixTriangularSolve = batchMatrixTriangularSolve' id batchMatrixTriangularSolve' :: forall v'1 v'2 t . (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __matrix__ -> Tensor v'2 t -- ^ __rhs__ -> Tensor Build t -- ^ __output__ batchMatrixTriangularSolve' op'options matrix rhs | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs matrix, buildInputs rhs] return (opDef "BatchMatrixTriangularSolve" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "matrix" type_attr: "T" } input_arg { name: "rhs" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "lower" type: "bool" default_value { b: true } } attr { name: "adjoint" type: "bool" default_value { b: false } } attr { name: "T" type: "type" allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } } } -} -- | batchNormWithGlobalNormalization :: forall v'1 v'2 v'3 v'4 v'5 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Bool -- ^ __scale_after_normalization__ -> Float -- ^ __variance_epsilon__ -> Tensor v'1 t -- ^ __t__ -> Tensor v'2 t -- ^ __m__ -> Tensor v'3 t -- ^ __v__ -> Tensor v'4 t -- ^ __beta__ -> Tensor v'5 t -- ^ __gamma__ -> Tensor Build t -- ^ __result__ batchNormWithGlobalNormalization = batchNormWithGlobalNormalization' id batchNormWithGlobalNormalization' :: forall v'1 v'2 v'3 v'4 v'5 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Bool -- ^ __scale_after_normalization__ -> Float -- ^ __variance_epsilon__ -> Tensor v'1 t -- ^ __t__ -> Tensor v'2 t -- ^ __m__ -> Tensor v'3 t -- ^ __v__ -> Tensor v'4 t -- ^ __beta__ -> Tensor v'5 t -- ^ __gamma__ -> Tensor Build t -- ^ __result__ batchNormWithGlobalNormalization' op'options scale_after_normalization variance_epsilon t m v beta gamma | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs t, buildInputs m, buildInputs v, buildInputs beta, buildInputs gamma] return (opDef "BatchNormWithGlobalNormalization" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "scale_after_normalization" .~ scale_after_normalization & opAttr "variance_epsilon" .~ variance_epsilon & op'options & opInputs .~ op'inputs) {- input_arg { name: "t" type_attr: "T" } input_arg { name: "m" type_attr: "T" } input_arg { name: "v" type_attr: "T" } input_arg { name: "beta" type_attr: "T" } input_arg { name: "gamma" type_attr: "T" } output_arg { name: "result" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "variance_epsilon" type: "float" } attr { name: "scale_after_normalization" type: "bool" } -} -- | batchNormWithGlobalNormalizationGrad :: forall v'1 v'2 v'3 v'4 v'5 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Bool -- ^ __scale_after_normalization__ -> Float -- ^ __variance_epsilon__ -> Tensor v'1 t -- ^ __t__ -> Tensor v'2 t -- ^ __m__ -> Tensor v'3 t -- ^ __v__ -> Tensor v'4 t -- ^ __gamma__ -> Tensor v'5 t -- ^ __backprop__ -> (Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t) -- ^ (__dx__, __dm__, __dv__, __db__, __dg__) -- -- * __dx__ -- -- * __dm__ -- -- * __dv__ -- -- * __db__ -- -- * __dg__ batchNormWithGlobalNormalizationGrad = batchNormWithGlobalNormalizationGrad' id batchNormWithGlobalNormalizationGrad' :: forall v'1 v'2 v'3 v'4 v'5 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Bool -- ^ __scale_after_normalization__ -> Float -- ^ __variance_epsilon__ -> Tensor v'1 t -- ^ __t__ -> Tensor v'2 t -- ^ __m__ -> Tensor v'3 t -- ^ __v__ -> Tensor v'4 t -- ^ __gamma__ -> Tensor v'5 t -- ^ __backprop__ -> (Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t) -- ^ (__dx__, __dm__, __dv__, __db__, __dg__) -- -- * __dx__ -- -- * __dm__ -- -- * __dv__ -- -- * __db__ -- -- * __dg__ batchNormWithGlobalNormalizationGrad' op'options scale_after_normalization variance_epsilon t m v gamma backprop | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs t, buildInputs m, buildInputs v, buildInputs gamma, buildInputs backprop] return (opDef "BatchNormWithGlobalNormalizationGrad" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "scale_after_normalization" .~ scale_after_normalization & opAttr "variance_epsilon" .~ variance_epsilon & op'options & opInputs .~ op'inputs) {- input_arg { name: "t" type_attr: "T" } input_arg { name: "m" type_attr: "T" } input_arg { name: "v" type_attr: "T" } input_arg { name: "gamma" type_attr: "T" } input_arg { name: "backprop" type_attr: "T" } output_arg { name: "dx" type_attr: "T" } output_arg { name: "dm" type_attr: "T" } output_arg { name: "dv" type_attr: "T" } output_arg { name: "db" type_attr: "T" } output_arg { name: "dg" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "variance_epsilon" type: "float" } attr { name: "scale_after_normalization" type: "bool" } -} -- | batchSelfAdjointEig :: forall v'1 t . (OneOf '[Double, Float] t) => Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ batchSelfAdjointEig = batchSelfAdjointEig' id batchSelfAdjointEig' :: forall v'1 t . (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ batchSelfAdjointEig' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "BatchSelfAdjointEig" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } } } -} -- | batchSelfAdjointEigV2 :: forall v'1 t . (OneOf '[Double, Float] t) => Tensor v'1 t -- ^ __input__ -> (Tensor Build t, Tensor Build t) -- ^ (__e__, __v__) -- -- * __e__ -- -- * __v__ batchSelfAdjointEigV2 = batchSelfAdjointEigV2' id batchSelfAdjointEigV2' :: forall v'1 t . (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> (Tensor Build t, Tensor Build t) -- ^ (__e__, __v__) -- -- * __e__ -- -- * __v__ batchSelfAdjointEigV2' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "BatchSelfAdjointEigV2" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "e" type_attr: "T" } output_arg { name: "v" type_attr: "T" } attr { name: "compute_v" type: "bool" default_value { b: true } } attr { name: "T" type: "type" allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } } } -} -- | batchSvd :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Double, Float] t) => Tensor v'1 t -- ^ __input__ -> (Tensor Build t, Tensor Build t, Tensor Build t) -- ^ (__s__, __u__, __v__) -- -- * __s__ -- -- * __u__ -- -- * __v__ batchSvd = batchSvd' id batchSvd' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> (Tensor Build t, Tensor Build t, Tensor Build t) -- ^ (__s__, __u__, __v__) -- -- * __s__ -- -- * __u__ -- -- * __v__ batchSvd' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "BatchSvd" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "s" type_attr: "T" } output_arg { name: "u" type_attr: "T" } output_arg { name: "v" type_attr: "T" } attr { name: "compute_uv" type: "bool" default_value { b: true } } attr { name: "full_matrices" type: "bool" default_value { b: false } } attr { name: "T" type: "type" allowed_values { list { type: DT_DOUBLE type: DT_FLOAT type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | batchToSpace :: forall v'1 v'2 t tidx . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => Data.Int.Int64 -- ^ __block_size__ -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 tidx -- ^ __crops__ -> Tensor Build t -- ^ __output__ batchToSpace = batchToSpace' id batchToSpace' :: forall v'1 v'2 t tidx . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => OpParams -> Data.Int.Int64 -- ^ __block_size__ -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 tidx -- ^ __crops__ -> Tensor Build t -- ^ __output__ batchToSpace' op'options block_size input crops | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs crops] return (opDef "BatchToSpace" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tidx" .~ tensorType (undefined :: tidx) & opAttr "block_size" .~ block_size & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "crops" type_attr: "Tidx" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "block_size" type: "int" has_minimum: true minimum: 2 } attr { name: "Tidx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | batchToSpaceND :: forall v'1 v'2 v'3 t tblock_shape tcrops . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tblock_shape, OneOf '[Data.Int.Int32, Data.Int.Int64] tcrops) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 tblock_shape -- ^ __block_shape__ -> Tensor v'3 tcrops -- ^ __crops__ -> Tensor Build t -- ^ __output__ batchToSpaceND = batchToSpaceND' id batchToSpaceND' :: forall v'1 v'2 v'3 t tblock_shape tcrops . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tblock_shape, OneOf '[Data.Int.Int32, Data.Int.Int64] tcrops) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 tblock_shape -- ^ __block_shape__ -> Tensor v'3 tcrops -- ^ __crops__ -> Tensor Build t -- ^ __output__ batchToSpaceND' op'options input block_shape crops | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs block_shape, buildInputs crops] return (opDef "BatchToSpaceND" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tblock_shape" .~ tensorType (undefined :: tblock_shape) & opAttr "Tcrops" .~ tensorType (undefined :: tcrops) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "block_shape" type_attr: "Tblock_shape" } input_arg { name: "crops" type_attr: "Tcrops" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "Tblock_shape" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "Tcrops" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | besselI0e :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ besselI0e = besselI0e' id besselI0e' :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ besselI0e' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "BesselI0e" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } -} -- | besselI1e :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ besselI1e = besselI1e' id besselI1e' :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ besselI1e' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "BesselI1e" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } -} -- | betainc :: forall v'1 v'2 v'3 t . (OneOf '[Double, Float] t) => Tensor v'1 t -- ^ __a__ -> Tensor v'2 t -- ^ __b__ -> Tensor v'3 t -- ^ __x__ -> Tensor Build t -- ^ __z__ betainc = betainc' id betainc' :: forall v'1 v'2 v'3 t . (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __a__ -> Tensor v'2 t -- ^ __b__ -> Tensor v'3 t -- ^ __x__ -> Tensor Build t -- ^ __z__ betainc' op'options a b x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs a, buildInputs b, buildInputs x] return (opDef "Betainc" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "a" type_attr: "T" } input_arg { name: "b" type_attr: "T" } input_arg { name: "x" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } -} -- | biasAdd :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __value__ -> Tensor v'2 t -- ^ __bias__ -> Tensor Build t -- ^ __output__ biasAdd = biasAdd' id biasAdd' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __value__ -> Tensor v'2 t -- ^ __bias__ -> Tensor Build t -- ^ __output__ biasAdd' op'options value bias | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs value, buildInputs bias] return (opDef "BiasAdd" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "value" type_attr: "T" } input_arg { name: "bias" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "data_format" type: "string" default_value { s: "NHWC" } allowed_values { list { s: "NHWC" s: "NCHW" } } } -} -- | biasAddGrad :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __out_backprop__ -> Tensor Build t -- ^ __output__ biasAddGrad = biasAddGrad' id biasAddGrad' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __out_backprop__ -> Tensor Build t -- ^ __output__ biasAddGrad' op'options out_backprop | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs out_backprop] return (opDef "BiasAddGrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "out_backprop" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "data_format" type: "string" default_value { s: "NHWC" } allowed_values { list { s: "NHWC" s: "NCHW" } } } -} -- | biasAddV1 :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __value__ -> Tensor v'2 t -- ^ __bias__ -> Tensor Build t -- ^ __output__ biasAddV1 = biasAddV1' id biasAddV1' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __value__ -> Tensor v'2 t -- ^ __bias__ -> Tensor Build t -- ^ __output__ biasAddV1' op'options value bias | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs value, buildInputs bias] return (opDef "BiasAddV1" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "value" type_attr: "T" } input_arg { name: "bias" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } -} -- | A Reader that outputs rows from a BigQuery table as tensorflow Examples. bigQueryReader :: forall m' . (MonadBuild m') => Data.Int.Int64 -- ^ __timestamp_millis__: Table snapshot timestamp in millis since epoch. Relative -- (negative or zero) snapshot times are not allowed. For more details, see -- 'Table Decorators' in BigQuery docs. -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __reader_handle__: The handle to reference the Reader. bigQueryReader = bigQueryReader' id bigQueryReader' :: forall m' . (MonadBuild m') => OpParams -> Data.Int.Int64 -- ^ __timestamp_millis__: Table snapshot timestamp in millis since epoch. Relative -- (negative or zero) snapshot times are not allowed. For more details, see -- 'Table Decorators' in BigQuery docs. -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __reader_handle__: The handle to reference the Reader. bigQueryReader' op'options timestamp_millis | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "BigQueryReader" & opAttr "timestamp_millis" .~ timestamp_millis & op'options & opInputs .~ op'inputs) {- output_arg { name: "reader_handle" description: "The handle to reference the Reader." type: DT_STRING is_ref: true } attr { name: "container" type: "string" default_value { s: "" } description: "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used." } attr { name: "shared_name" type: "string" default_value { s: "" } description: "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead." } attr { name: "project_id" type: "string" description: "GCP project ID." } attr { name: "dataset_id" type: "string" description: "BigQuery Dataset ID." } attr { name: "table_id" type: "string" description: "Table to read." } attr { name: "columns" type: "list(string)" description: "List of columns to read. Leave empty to read all columns." } attr { name: "timestamp_millis" type: "int" description: "Table snapshot timestamp in millis since epoch. Relative\n(negative or zero) snapshot times are not allowed. For more details, see\n\'Table Decorators\' in BigQuery docs." } attr { name: "test_end_point" type: "string" default_value { s: "" } description: "Do not use. For testing purposes only." } -} -- | bincount :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int32, Data.Int.Int64, Double, Float] t) => Tensor v'1 Data.Int.Int32 -- ^ __arr__ -> Tensor v'2 Data.Int.Int32 -- ^ __size__ -> Tensor v'3 t -- ^ __weights__ -> Tensor Build t -- ^ __bins__ bincount = bincount' id bincount' :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int32, Data.Int.Int64, Double, Float] t) => OpParams -> Tensor v'1 Data.Int.Int32 -- ^ __arr__ -> Tensor v'2 Data.Int.Int32 -- ^ __size__ -> Tensor v'3 t -- ^ __weights__ -> Tensor Build t -- ^ __bins__ bincount' op'options arr size weights | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs arr, buildInputs size, buildInputs weights] return (opDef "Bincount" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "arr" type: DT_INT32 } input_arg { name: "size" type: DT_INT32 } input_arg { name: "weights" type_attr: "T" } output_arg { name: "bins" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 type: DT_FLOAT type: DT_DOUBLE } } } -} -- | bitcast :: forall v'1 t type' . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] type') => Tensor v'1 t -- ^ __input__ -> Tensor Build type' -- ^ __output__ bitcast = bitcast' id bitcast' :: forall v'1 t type' . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] type') => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor Build type' -- ^ __output__ bitcast' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "Bitcast" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "type" .~ tensorType (undefined :: type') & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type_attr: "type" } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_UINT32 type: DT_UINT64 type: DT_INT8 type: DT_INT16 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT16 type: DT_QUINT16 type: DT_QINT32 } } } attr { name: "type" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT64 type: DT_INT32 type: DT_UINT8 type: DT_UINT16 type: DT_UINT32 type: DT_UINT64 type: DT_INT8 type: DT_INT16 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT16 type: DT_QUINT16 type: DT_QINT32 } } } -} -- | bitwiseAnd :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ bitwiseAnd = bitwiseAnd' id bitwiseAnd' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ bitwiseAnd' op'options x y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y] return (opDef "BitwiseAnd" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_INT8 type: DT_INT16 type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_UINT16 type: DT_UINT32 type: DT_UINT64 } } } -} -- | bitwiseOr :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ bitwiseOr = bitwiseOr' id bitwiseOr' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ bitwiseOr' op'options x y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y] return (opDef "BitwiseOr" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_INT8 type: DT_INT16 type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_UINT16 type: DT_UINT32 type: DT_UINT64 } } } -} -- | bitwiseXor :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ bitwiseXor = bitwiseXor' id bitwiseXor' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ bitwiseXor' op'options x y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y] return (opDef "BitwiseXor" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_INT8 type: DT_INT16 type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_UINT16 type: DT_UINT32 type: DT_UINT64 } } } -} -- | boostedTreesAggregateStats :: Data.Int.Int64 -- ^ __max_splits__ -> Data.Int.Int64 -- ^ __num_buckets__ -> Tensor v'1 Data.Int.Int32 -- ^ __node_ids__ -> Tensor v'2 Float -- ^ __gradients__ -> Tensor v'3 Float -- ^ __hessians__ -> Tensor v'4 Data.Int.Int32 -- ^ __feature__ -> Tensor Build Float -- ^ __stats_summary__ boostedTreesAggregateStats = boostedTreesAggregateStats' id boostedTreesAggregateStats' :: OpParams -> Data.Int.Int64 -- ^ __max_splits__ -> Data.Int.Int64 -- ^ __num_buckets__ -> Tensor v'1 Data.Int.Int32 -- ^ __node_ids__ -> Tensor v'2 Float -- ^ __gradients__ -> Tensor v'3 Float -- ^ __hessians__ -> Tensor v'4 Data.Int.Int32 -- ^ __feature__ -> Tensor Build Float -- ^ __stats_summary__ boostedTreesAggregateStats' op'options max_splits num_buckets node_ids gradients hessians feature | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs node_ids, buildInputs gradients, buildInputs hessians, buildInputs feature] return (opDef "BoostedTreesAggregateStats" & opAttr "max_splits" .~ max_splits & opAttr "num_buckets" .~ num_buckets & op'options & opInputs .~ op'inputs) {- input_arg { name: "node_ids" type: DT_INT32 } input_arg { name: "gradients" type: DT_FLOAT } input_arg { name: "hessians" type: DT_FLOAT } input_arg { name: "feature" type: DT_INT32 } output_arg { name: "stats_summary" type: DT_FLOAT } attr { name: "max_splits" type: "int" has_minimum: true minimum: 1 } attr { name: "num_buckets" type: "int" has_minimum: true minimum: 1 } -} -- | boostedTreesBucketize :: [Tensor v'1 Float] -- ^ __float_values__ -> [Tensor v'2 Float] -- ^ __bucket_boundaries__ -> [Tensor Build Data.Int.Int32] -- ^ __buckets__ boostedTreesBucketize = boostedTreesBucketize' id boostedTreesBucketize' :: OpParams -> [Tensor v'1 Float] -- ^ __float_values__ -> [Tensor v'2 Float] -- ^ __bucket_boundaries__ -> [Tensor Build Data.Int.Int32] -- ^ __buckets__ boostedTreesBucketize' op'options float_values bucket_boundaries | eqLengthGuard [("num_features", [("float_values", length float_values), ("bucket_boundaries", length bucket_boundaries)])] = pureOp [num_features] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs float_values, buildInputs bucket_boundaries] return (opDef "BoostedTreesBucketize" & opAttr "num_features" .~ num_features & op'options & opInputs .~ op'inputs) where num_features = fromIntegral (length float_values) :: Int64 {- input_arg { name: "float_values" type: DT_FLOAT number_attr: "num_features" } input_arg { name: "bucket_boundaries" type: DT_FLOAT number_attr: "num_features" } output_arg { name: "buckets" type: DT_INT32 number_attr: "num_features" } attr { name: "num_features" type: "int" has_minimum: true } -} -- | boostedTreesCalculateBestFeatureSplit :: Data.Int.Int64 -- ^ __logits_dimension__ -> Tensor v'1 Data.Int.Int32 -- ^ __node_id_range__ -> Tensor v'2 Float -- ^ __stats_summary__ -> Tensor v'3 Float -- ^ __l1__ -> Tensor v'4 Float -- ^ __l2__ -> Tensor v'5 Float -- ^ __tree_complexity__ -> Tensor v'6 Float -- ^ __min_node_weight__ -> (Tensor Build Data.Int.Int32, Tensor Build Float, Tensor Build Data.Int.Int32, Tensor Build Data.Int.Int32, Tensor Build Float, Tensor Build Float, Tensor Build Data.ByteString.ByteString) -- ^ (__node_ids__, __gains__, __feature_dimensions__, __thresholds__, __left_node_contribs__, __right_node_contribs__, __split_with_default_directions__) -- -- * __node_ids__ -- -- * __gains__ -- -- * __feature_dimensions__ -- -- * __thresholds__ -- -- * __left_node_contribs__ -- -- * __right_node_contribs__ -- -- * __split_with_default_directions__ boostedTreesCalculateBestFeatureSplit = boostedTreesCalculateBestFeatureSplit' id boostedTreesCalculateBestFeatureSplit' :: OpParams -> Data.Int.Int64 -- ^ __logits_dimension__ -> Tensor v'1 Data.Int.Int32 -- ^ __node_id_range__ -> Tensor v'2 Float -- ^ __stats_summary__ -> Tensor v'3 Float -- ^ __l1__ -> Tensor v'4 Float -- ^ __l2__ -> Tensor v'5 Float -- ^ __tree_complexity__ -> Tensor v'6 Float -- ^ __min_node_weight__ -> (Tensor Build Data.Int.Int32, Tensor Build Float, Tensor Build Data.Int.Int32, Tensor Build Data.Int.Int32, Tensor Build Float, Tensor Build Float, Tensor Build Data.ByteString.ByteString) -- ^ (__node_ids__, __gains__, __feature_dimensions__, __thresholds__, __left_node_contribs__, __right_node_contribs__, __split_with_default_directions__) -- -- * __node_ids__ -- -- * __gains__ -- -- * __feature_dimensions__ -- -- * __thresholds__ -- -- * __left_node_contribs__ -- -- * __right_node_contribs__ -- -- * __split_with_default_directions__ boostedTreesCalculateBestFeatureSplit' op'options logits_dimension node_id_range stats_summary l1 l2 tree_complexity min_node_weight | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs node_id_range, buildInputs stats_summary, buildInputs l1, buildInputs l2, buildInputs tree_complexity, buildInputs min_node_weight] return (opDef "BoostedTreesCalculateBestFeatureSplit" & opAttr "logits_dimension" .~ logits_dimension & op'options & opInputs .~ op'inputs) {- input_arg { name: "node_id_range" type: DT_INT32 } input_arg { name: "stats_summary" type: DT_FLOAT } input_arg { name: "l1" type: DT_FLOAT } input_arg { name: "l2" type: DT_FLOAT } input_arg { name: "tree_complexity" type: DT_FLOAT } input_arg { name: "min_node_weight" type: DT_FLOAT } output_arg { name: "node_ids" type: DT_INT32 } output_arg { name: "gains" type: DT_FLOAT } output_arg { name: "feature_dimensions" type: DT_INT32 } output_arg { name: "thresholds" type: DT_INT32 } output_arg { name: "left_node_contribs" type: DT_FLOAT } output_arg { name: "right_node_contribs" type: DT_FLOAT } output_arg { name: "split_with_default_directions" type: DT_STRING } attr { name: "logits_dimension" type: "int" has_minimum: true minimum: 1 } attr { name: "split_type" type: "string" default_value { s: "inequality" } allowed_values { list { s: "inequality" } } } -} -- | boostedTreesCalculateBestGainsPerFeature :: Data.Int.Int64 -- ^ __max_splits__ -> Tensor v'1 Data.Int.Int32 -- ^ __node_id_range__ -> [Tensor v'2 Float] -- ^ __stats_summary_list__ -> Tensor v'3 Float -- ^ __l1__ -> Tensor v'4 Float -- ^ __l2__ -> Tensor v'5 Float -- ^ __tree_complexity__ -> Tensor v'6 Float -- ^ __min_node_weight__ -> ([Tensor Build Data.Int.Int32], [Tensor Build Float], [Tensor Build Data.Int.Int32], [Tensor Build Float], [Tensor Build Float]) -- ^ (__node_ids_list__, __gains_list__, __thresholds_list__, __left_node_contribs_list__, __right_node_contribs_list__) -- -- * __node_ids_list__ -- -- * __gains_list__ -- -- * __thresholds_list__ -- -- * __left_node_contribs_list__ -- -- * __right_node_contribs_list__ boostedTreesCalculateBestGainsPerFeature = boostedTreesCalculateBestGainsPerFeature' id boostedTreesCalculateBestGainsPerFeature' :: OpParams -> Data.Int.Int64 -- ^ __max_splits__ -> Tensor v'1 Data.Int.Int32 -- ^ __node_id_range__ -> [Tensor v'2 Float] -- ^ __stats_summary_list__ -> Tensor v'3 Float -- ^ __l1__ -> Tensor v'4 Float -- ^ __l2__ -> Tensor v'5 Float -- ^ __tree_complexity__ -> Tensor v'6 Float -- ^ __min_node_weight__ -> ([Tensor Build Data.Int.Int32], [Tensor Build Float], [Tensor Build Data.Int.Int32], [Tensor Build Float], [Tensor Build Float]) -- ^ (__node_ids_list__, __gains_list__, __thresholds_list__, __left_node_contribs_list__, __right_node_contribs_list__) -- -- * __node_ids_list__ -- -- * __gains_list__ -- -- * __thresholds_list__ -- -- * __left_node_contribs_list__ -- -- * __right_node_contribs_list__ boostedTreesCalculateBestGainsPerFeature' op'options max_splits node_id_range stats_summary_list l1 l2 tree_complexity min_node_weight | eqLengthGuard [("num_features", [("stats_summary_list", length stats_summary_list)])] = pureOp [num_features, num_features, num_features, num_features, num_features] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs node_id_range, buildInputs stats_summary_list, buildInputs l1, buildInputs l2, buildInputs tree_complexity, buildInputs min_node_weight] return (opDef "BoostedTreesCalculateBestGainsPerFeature" & opAttr "max_splits" .~ max_splits & opAttr "num_features" .~ num_features & op'options & opInputs .~ op'inputs) where num_features = fromIntegral (length stats_summary_list) :: Int64 {- input_arg { name: "node_id_range" type: DT_INT32 } input_arg { name: "stats_summary_list" type: DT_FLOAT number_attr: "num_features" } input_arg { name: "l1" type: DT_FLOAT } input_arg { name: "l2" type: DT_FLOAT } input_arg { name: "tree_complexity" type: DT_FLOAT } input_arg { name: "min_node_weight" type: DT_FLOAT } output_arg { name: "node_ids_list" type: DT_INT32 number_attr: "num_features" } output_arg { name: "gains_list" type: DT_FLOAT number_attr: "num_features" } output_arg { name: "thresholds_list" type: DT_INT32 number_attr: "num_features" } output_arg { name: "left_node_contribs_list" type: DT_FLOAT number_attr: "num_features" } output_arg { name: "right_node_contribs_list" type: DT_FLOAT number_attr: "num_features" } attr { name: "max_splits" type: "int" has_minimum: true minimum: 1 } attr { name: "num_features" type: "int" has_minimum: true minimum: 1 } -} -- | boostedTreesCenterBias :: forall v'1 v'2 v'3 v'4 v'5 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __tree_ensemble_handle__ -> Tensor v'2 Float -- ^ __mean_gradients__ -> Tensor v'3 Float -- ^ __mean_hessians__ -> Tensor v'4 Float -- ^ __l1__ -> Tensor v'5 Float -- ^ __l2__ -> m' (Tensor Value Bool) -- ^ __continue_centering__ boostedTreesCenterBias = boostedTreesCenterBias' id boostedTreesCenterBias' :: forall v'1 v'2 v'3 v'4 v'5 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __tree_ensemble_handle__ -> Tensor v'2 Float -- ^ __mean_gradients__ -> Tensor v'3 Float -- ^ __mean_hessians__ -> Tensor v'4 Float -- ^ __l1__ -> Tensor v'5 Float -- ^ __l2__ -> m' (Tensor Value Bool) -- ^ __continue_centering__ boostedTreesCenterBias' op'options tree_ensemble_handle mean_gradients mean_hessians l1 l2 | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs tree_ensemble_handle, buildInputs mean_gradients, buildInputs mean_hessians, buildInputs l1, buildInputs l2] buildOp [] (opDef "BoostedTreesCenterBias" & op'options & opInputs .~ op'inputs) {- input_arg { name: "tree_ensemble_handle" type: DT_RESOURCE } input_arg { name: "mean_gradients" type: DT_FLOAT } input_arg { name: "mean_hessians" type: DT_FLOAT } input_arg { name: "l1" type: DT_FLOAT } input_arg { name: "l2" type: DT_FLOAT } output_arg { name: "continue_centering" type: DT_BOOL } -} -- | boostedTreesCreateEnsemble :: forall v'1 v'2 v'3 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __tree_ensemble_handle__ -> Tensor v'2 Data.Int.Int64 -- ^ __stamp_token__ -> Tensor v'3 Data.ByteString.ByteString -- ^ __tree_ensemble_serialized__ -> m' (ControlNode) boostedTreesCreateEnsemble = boostedTreesCreateEnsemble' id boostedTreesCreateEnsemble' :: forall v'1 v'2 v'3 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __tree_ensemble_handle__ -> Tensor v'2 Data.Int.Int64 -- ^ __stamp_token__ -> Tensor v'3 Data.ByteString.ByteString -- ^ __tree_ensemble_serialized__ -> m' (ControlNode) boostedTreesCreateEnsemble' op'options tree_ensemble_handle stamp_token tree_ensemble_serialized | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs tree_ensemble_handle, buildInputs stamp_token, buildInputs tree_ensemble_serialized] buildOp [] (opDef "BoostedTreesCreateEnsemble" & op'options & opInputs .~ op'inputs) {- input_arg { name: "tree_ensemble_handle" type: DT_RESOURCE } input_arg { name: "stamp_token" type: DT_INT64 } input_arg { name: "tree_ensemble_serialized" type: DT_STRING } -} -- | boostedTreesCreateQuantileStreamResource :: forall v'1 v'2 v'3 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __quantile_stream_resource_handle__ -> Tensor v'2 Float -- ^ __epsilon__ -> Tensor v'3 Data.Int.Int64 -- ^ __num_streams__ -> m' (ControlNode) boostedTreesCreateQuantileStreamResource = boostedTreesCreateQuantileStreamResource' id boostedTreesCreateQuantileStreamResource' :: forall v'1 v'2 v'3 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __quantile_stream_resource_handle__ -> Tensor v'2 Float -- ^ __epsilon__ -> Tensor v'3 Data.Int.Int64 -- ^ __num_streams__ -> m' (ControlNode) boostedTreesCreateQuantileStreamResource' op'options quantile_stream_resource_handle epsilon num_streams | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs quantile_stream_resource_handle, buildInputs epsilon, buildInputs num_streams] buildOp [] (opDef "BoostedTreesCreateQuantileStreamResource" & op'options & opInputs .~ op'inputs) {- input_arg { name: "quantile_stream_resource_handle" type: DT_RESOURCE } input_arg { name: "epsilon" type: DT_FLOAT } input_arg { name: "num_streams" type: DT_INT64 } attr { name: "max_elements" type: "int" default_value { i: 1099511627776 } } -} -- | boostedTreesDeserializeEnsemble :: forall v'1 v'2 v'3 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __tree_ensemble_handle__ -> Tensor v'2 Data.Int.Int64 -- ^ __stamp_token__ -> Tensor v'3 Data.ByteString.ByteString -- ^ __tree_ensemble_serialized__ -> m' (ControlNode) boostedTreesDeserializeEnsemble = boostedTreesDeserializeEnsemble' id boostedTreesDeserializeEnsemble' :: forall v'1 v'2 v'3 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __tree_ensemble_handle__ -> Tensor v'2 Data.Int.Int64 -- ^ __stamp_token__ -> Tensor v'3 Data.ByteString.ByteString -- ^ __tree_ensemble_serialized__ -> m' (ControlNode) boostedTreesDeserializeEnsemble' op'options tree_ensemble_handle stamp_token tree_ensemble_serialized | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs tree_ensemble_handle, buildInputs stamp_token, buildInputs tree_ensemble_serialized] buildOp [] (opDef "BoostedTreesDeserializeEnsemble" & op'options & opInputs .~ op'inputs) {- input_arg { name: "tree_ensemble_handle" type: DT_RESOURCE } input_arg { name: "stamp_token" type: DT_INT64 } input_arg { name: "tree_ensemble_serialized" type: DT_STRING } -} -- | boostedTreesEnsembleResourceHandleOp :: forall m' . (MonadBuild m') => m' (Tensor Value ResourceHandle) -- ^ __resource__ boostedTreesEnsembleResourceHandleOp = boostedTreesEnsembleResourceHandleOp' id boostedTreesEnsembleResourceHandleOp' :: forall m' . (MonadBuild m') => OpParams -> m' (Tensor Value ResourceHandle) -- ^ __resource__ boostedTreesEnsembleResourceHandleOp' op'options | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "BoostedTreesEnsembleResourceHandleOp" & op'options & opInputs .~ op'inputs) {- output_arg { name: "resource" type: DT_RESOURCE } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } -} -- | boostedTreesExampleDebugOutputs :: forall v'1 v'2 m' . (MonadBuild m') => Data.Int.Int64 -- ^ __logits_dimension__ -> Tensor v'1 ResourceHandle -- ^ __tree_ensemble_handle__ -> [Tensor v'2 Data.Int.Int32] -- ^ __bucketized_features__ -> m' (Tensor Value Data.ByteString.ByteString) -- ^ __examples_debug_outputs_serialized__ boostedTreesExampleDebugOutputs = boostedTreesExampleDebugOutputs' id boostedTreesExampleDebugOutputs' :: forall v'1 v'2 m' . (MonadBuild m') => OpParams -> Data.Int.Int64 -- ^ __logits_dimension__ -> Tensor v'1 ResourceHandle -- ^ __tree_ensemble_handle__ -> [Tensor v'2 Data.Int.Int32] -- ^ __bucketized_features__ -> m' (Tensor Value Data.ByteString.ByteString) -- ^ __examples_debug_outputs_serialized__ boostedTreesExampleDebugOutputs' op'options logits_dimension tree_ensemble_handle bucketized_features | eqLengthGuard [("num_bucketized_features", [("bucketized_features", length bucketized_features)])] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs tree_ensemble_handle, buildInputs bucketized_features] buildOp [] (opDef "BoostedTreesExampleDebugOutputs" & opAttr "logits_dimension" .~ logits_dimension & opAttr "num_bucketized_features" .~ num_bucketized_features & op'options & opInputs .~ op'inputs) where num_bucketized_features = fromIntegral (length bucketized_features) :: Int64 {- input_arg { name: "tree_ensemble_handle" type: DT_RESOURCE } input_arg { name: "bucketized_features" type: DT_INT32 number_attr: "num_bucketized_features" } output_arg { name: "examples_debug_outputs_serialized" type: DT_STRING } attr { name: "num_bucketized_features" type: "int" has_minimum: true minimum: 1 } attr { name: "logits_dimension" type: "int" } -} -- | boostedTreesGetEnsembleStates :: forall v'1 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __tree_ensemble_handle__ -> m' ((Tensor Value Data.Int.Int64, Tensor Value Data.Int.Int32, Tensor Value Data.Int.Int32, Tensor Value Data.Int.Int32, Tensor Value Data.Int.Int32)) -- ^ (__stamp_token__, __num_trees__, __num_finalized_trees__, __num_attempted_layers__, __last_layer_nodes_range__) -- -- * __stamp_token__ -- -- * __num_trees__ -- -- * __num_finalized_trees__ -- -- * __num_attempted_layers__ -- -- * __last_layer_nodes_range__ boostedTreesGetEnsembleStates = boostedTreesGetEnsembleStates' id boostedTreesGetEnsembleStates' :: forall v'1 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __tree_ensemble_handle__ -> m' ((Tensor Value Data.Int.Int64, Tensor Value Data.Int.Int32, Tensor Value Data.Int.Int32, Tensor Value Data.Int.Int32, Tensor Value Data.Int.Int32)) -- ^ (__stamp_token__, __num_trees__, __num_finalized_trees__, __num_attempted_layers__, __last_layer_nodes_range__) -- -- * __stamp_token__ -- -- * __num_trees__ -- -- * __num_finalized_trees__ -- -- * __num_attempted_layers__ -- -- * __last_layer_nodes_range__ boostedTreesGetEnsembleStates' op'options tree_ensemble_handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs tree_ensemble_handle] buildOp [] (opDef "BoostedTreesGetEnsembleStates" & op'options & opInputs .~ op'inputs) {- input_arg { name: "tree_ensemble_handle" type: DT_RESOURCE } output_arg { name: "stamp_token" type: DT_INT64 } output_arg { name: "num_trees" type: DT_INT32 } output_arg { name: "num_finalized_trees" type: DT_INT32 } output_arg { name: "num_attempted_layers" type: DT_INT32 } output_arg { name: "last_layer_nodes_range" type: DT_INT32 } -} -- | boostedTreesMakeQuantileSummaries :: [Tensor v'1 Float] -- ^ __float_values__ -> Tensor v'2 Float -- ^ __example_weights__ -> Tensor v'3 Float -- ^ __epsilon__ -> [Tensor Build Float] -- ^ __summaries__ boostedTreesMakeQuantileSummaries = boostedTreesMakeQuantileSummaries' id boostedTreesMakeQuantileSummaries' :: OpParams -> [Tensor v'1 Float] -- ^ __float_values__ -> Tensor v'2 Float -- ^ __example_weights__ -> Tensor v'3 Float -- ^ __epsilon__ -> [Tensor Build Float] -- ^ __summaries__ boostedTreesMakeQuantileSummaries' op'options float_values example_weights epsilon | eqLengthGuard [("num_features", [("float_values", length float_values)])] = pureOp [num_features] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs float_values, buildInputs example_weights, buildInputs epsilon] return (opDef "BoostedTreesMakeQuantileSummaries" & opAttr "num_features" .~ num_features & op'options & opInputs .~ op'inputs) where num_features = fromIntegral (length float_values) :: Int64 {- input_arg { name: "float_values" type: DT_FLOAT number_attr: "num_features" } input_arg { name: "example_weights" type: DT_FLOAT } input_arg { name: "epsilon" type: DT_FLOAT } output_arg { name: "summaries" type: DT_FLOAT number_attr: "num_features" } attr { name: "num_features" type: "int" has_minimum: true } -} -- | boostedTreesMakeStatsSummary :: Data.Int.Int64 -- ^ __max_splits__ -> Data.Int.Int64 -- ^ __num_buckets__ -> Tensor v'1 Data.Int.Int32 -- ^ __node_ids__ -> Tensor v'2 Float -- ^ __gradients__ -> Tensor v'3 Float -- ^ __hessians__ -> [Tensor v'4 Data.Int.Int32] -- ^ __bucketized_features_list__ -> Tensor Build Float -- ^ __stats_summary__ boostedTreesMakeStatsSummary = boostedTreesMakeStatsSummary' id boostedTreesMakeStatsSummary' :: OpParams -> Data.Int.Int64 -- ^ __max_splits__ -> Data.Int.Int64 -- ^ __num_buckets__ -> Tensor v'1 Data.Int.Int32 -- ^ __node_ids__ -> Tensor v'2 Float -- ^ __gradients__ -> Tensor v'3 Float -- ^ __hessians__ -> [Tensor v'4 Data.Int.Int32] -- ^ __bucketized_features_list__ -> Tensor Build Float -- ^ __stats_summary__ boostedTreesMakeStatsSummary' op'options max_splits num_buckets node_ids gradients hessians bucketized_features_list | eqLengthGuard [("num_features", [("bucketized_features_list", length bucketized_features_list)])] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs node_ids, buildInputs gradients, buildInputs hessians, buildInputs bucketized_features_list] return (opDef "BoostedTreesMakeStatsSummary" & opAttr "max_splits" .~ max_splits & opAttr "num_buckets" .~ num_buckets & opAttr "num_features" .~ num_features & op'options & opInputs .~ op'inputs) where num_features = fromIntegral (length bucketized_features_list) :: Int64 {- input_arg { name: "node_ids" type: DT_INT32 } input_arg { name: "gradients" type: DT_FLOAT } input_arg { name: "hessians" type: DT_FLOAT } input_arg { name: "bucketized_features_list" type: DT_INT32 number_attr: "num_features" } output_arg { name: "stats_summary" type: DT_FLOAT } attr { name: "max_splits" type: "int" has_minimum: true minimum: 1 } attr { name: "num_buckets" type: "int" has_minimum: true minimum: 1 } attr { name: "num_features" type: "int" has_minimum: true minimum: 1 } -} -- | boostedTreesPredict :: forall v'1 v'2 m' . (MonadBuild m') => Data.Int.Int64 -- ^ __logits_dimension__ -> Tensor v'1 ResourceHandle -- ^ __tree_ensemble_handle__ -> [Tensor v'2 Data.Int.Int32] -- ^ __bucketized_features__ -> m' (Tensor Value Float) -- ^ __logits__ boostedTreesPredict = boostedTreesPredict' id boostedTreesPredict' :: forall v'1 v'2 m' . (MonadBuild m') => OpParams -> Data.Int.Int64 -- ^ __logits_dimension__ -> Tensor v'1 ResourceHandle -- ^ __tree_ensemble_handle__ -> [Tensor v'2 Data.Int.Int32] -- ^ __bucketized_features__ -> m' (Tensor Value Float) -- ^ __logits__ boostedTreesPredict' op'options logits_dimension tree_ensemble_handle bucketized_features | eqLengthGuard [("num_bucketized_features", [("bucketized_features", length bucketized_features)])] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs tree_ensemble_handle, buildInputs bucketized_features] buildOp [] (opDef "BoostedTreesPredict" & opAttr "logits_dimension" .~ logits_dimension & opAttr "num_bucketized_features" .~ num_bucketized_features & op'options & opInputs .~ op'inputs) where num_bucketized_features = fromIntegral (length bucketized_features) :: Int64 {- input_arg { name: "tree_ensemble_handle" type: DT_RESOURCE } input_arg { name: "bucketized_features" type: DT_INT32 number_attr: "num_bucketized_features" } output_arg { name: "logits" type: DT_FLOAT } attr { name: "num_bucketized_features" type: "int" has_minimum: true minimum: 1 } attr { name: "logits_dimension" type: "int" } -} -- | boostedTreesQuantileStreamResourceAddSummaries :: forall v'1 v'2 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __quantile_stream_resource_handle__ -> [Tensor v'2 Float] -- ^ __summaries__ -> m' (ControlNode) boostedTreesQuantileStreamResourceAddSummaries = boostedTreesQuantileStreamResourceAddSummaries' id boostedTreesQuantileStreamResourceAddSummaries' :: forall v'1 v'2 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __quantile_stream_resource_handle__ -> [Tensor v'2 Float] -- ^ __summaries__ -> m' (ControlNode) boostedTreesQuantileStreamResourceAddSummaries' op'options quantile_stream_resource_handle summaries | eqLengthGuard [("num_features", [("summaries", length summaries)])] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs quantile_stream_resource_handle, buildInputs summaries] buildOp [] (opDef "BoostedTreesQuantileStreamResourceAddSummaries" & opAttr "num_features" .~ num_features & op'options & opInputs .~ op'inputs) where num_features = fromIntegral (length summaries) :: Int64 {- input_arg { name: "quantile_stream_resource_handle" type: DT_RESOURCE } input_arg { name: "summaries" type: DT_FLOAT number_attr: "num_features" } attr { name: "num_features" type: "int" has_minimum: true } -} -- | boostedTreesQuantileStreamResourceDeserialize :: forall v'1 v'2 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __quantile_stream_resource_handle__ -> [Tensor v'2 Float] -- ^ __bucket_boundaries__ -> m' (ControlNode) boostedTreesQuantileStreamResourceDeserialize = boostedTreesQuantileStreamResourceDeserialize' id boostedTreesQuantileStreamResourceDeserialize' :: forall v'1 v'2 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __quantile_stream_resource_handle__ -> [Tensor v'2 Float] -- ^ __bucket_boundaries__ -> m' (ControlNode) boostedTreesQuantileStreamResourceDeserialize' op'options quantile_stream_resource_handle bucket_boundaries | eqLengthGuard [("num_streams", [("bucket_boundaries", length bucket_boundaries)])] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs quantile_stream_resource_handle, buildInputs bucket_boundaries] buildOp [] (opDef "BoostedTreesQuantileStreamResourceDeserialize" & opAttr "num_streams" .~ num_streams & op'options & opInputs .~ op'inputs) where num_streams = fromIntegral (length bucket_boundaries) :: Int64 {- input_arg { name: "quantile_stream_resource_handle" type: DT_RESOURCE } input_arg { name: "bucket_boundaries" type: DT_FLOAT number_attr: "num_streams" } attr { name: "num_streams" type: "int" has_minimum: true minimum: 1 } -} -- | boostedTreesQuantileStreamResourceFlush :: forall v'1 v'2 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __quantile_stream_resource_handle__ -> Tensor v'2 Data.Int.Int64 -- ^ __num_buckets__ -> m' (ControlNode) boostedTreesQuantileStreamResourceFlush = boostedTreesQuantileStreamResourceFlush' id boostedTreesQuantileStreamResourceFlush' :: forall v'1 v'2 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __quantile_stream_resource_handle__ -> Tensor v'2 Data.Int.Int64 -- ^ __num_buckets__ -> m' (ControlNode) boostedTreesQuantileStreamResourceFlush' op'options quantile_stream_resource_handle num_buckets | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs quantile_stream_resource_handle, buildInputs num_buckets] buildOp [] (opDef "BoostedTreesQuantileStreamResourceFlush" & op'options & opInputs .~ op'inputs) {- input_arg { name: "quantile_stream_resource_handle" type: DT_RESOURCE } input_arg { name: "num_buckets" type: DT_INT64 } attr { name: "generate_quantiles" type: "bool" default_value { b: false } } -} -- | boostedTreesQuantileStreamResourceGetBucketBoundaries :: forall v'1 m' . (MonadBuild m') => Data.Int.Int64 -- ^ __num_features__ -> Tensor v'1 ResourceHandle -- ^ __quantile_stream_resource_handle__ -> m' ([Tensor Value Float]) -- ^ __bucket_boundaries__ boostedTreesQuantileStreamResourceGetBucketBoundaries = boostedTreesQuantileStreamResourceGetBucketBoundaries' id boostedTreesQuantileStreamResourceGetBucketBoundaries' :: forall v'1 m' . (MonadBuild m') => OpParams -> Data.Int.Int64 -- ^ __num_features__ -> Tensor v'1 ResourceHandle -- ^ __quantile_stream_resource_handle__ -> m' ([Tensor Value Float]) -- ^ __bucket_boundaries__ boostedTreesQuantileStreamResourceGetBucketBoundaries' op'options num_features quantile_stream_resource_handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs quantile_stream_resource_handle] buildOp [num_features] (opDef "BoostedTreesQuantileStreamResourceGetBucketBoundaries" & opAttr "num_features" .~ num_features & op'options & opInputs .~ op'inputs) {- input_arg { name: "quantile_stream_resource_handle" type: DT_RESOURCE } output_arg { name: "bucket_boundaries" type: DT_FLOAT number_attr: "num_features" } attr { name: "num_features" type: "int" has_minimum: true } -} -- | boostedTreesQuantileStreamResourceHandleOp :: forall m' . (MonadBuild m') => m' (Tensor Value ResourceHandle) -- ^ __resource__ boostedTreesQuantileStreamResourceHandleOp = boostedTreesQuantileStreamResourceHandleOp' id boostedTreesQuantileStreamResourceHandleOp' :: forall m' . (MonadBuild m') => OpParams -> m' (Tensor Value ResourceHandle) -- ^ __resource__ boostedTreesQuantileStreamResourceHandleOp' op'options | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "BoostedTreesQuantileStreamResourceHandleOp" & op'options & opInputs .~ op'inputs) {- output_arg { name: "resource" type: DT_RESOURCE } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } -} -- | boostedTreesSerializeEnsemble :: forall v'1 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __tree_ensemble_handle__ -> m' ((Tensor Value Data.Int.Int64, Tensor Value Data.ByteString.ByteString)) -- ^ (__stamp_token__, __tree_ensemble_serialized__) -- -- * __stamp_token__ -- -- * __tree_ensemble_serialized__ boostedTreesSerializeEnsemble = boostedTreesSerializeEnsemble' id boostedTreesSerializeEnsemble' :: forall v'1 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __tree_ensemble_handle__ -> m' ((Tensor Value Data.Int.Int64, Tensor Value Data.ByteString.ByteString)) -- ^ (__stamp_token__, __tree_ensemble_serialized__) -- -- * __stamp_token__ -- -- * __tree_ensemble_serialized__ boostedTreesSerializeEnsemble' op'options tree_ensemble_handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs tree_ensemble_handle] buildOp [] (opDef "BoostedTreesSerializeEnsemble" & op'options & opInputs .~ op'inputs) {- input_arg { name: "tree_ensemble_handle" type: DT_RESOURCE } output_arg { name: "stamp_token" type: DT_INT64 } output_arg { name: "tree_ensemble_serialized" type: DT_STRING } -} -- | boostedTreesTrainingPredict :: forall v'1 v'2 v'3 v'4 m' . (MonadBuild m') => Data.Int.Int64 -- ^ __logits_dimension__ -> Tensor v'1 ResourceHandle -- ^ __tree_ensemble_handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __cached_tree_ids__ -> Tensor v'3 Data.Int.Int32 -- ^ __cached_node_ids__ -> [Tensor v'4 Data.Int.Int32] -- ^ __bucketized_features__ -> m' ((Tensor Value Float, Tensor Value Data.Int.Int32, Tensor Value Data.Int.Int32)) -- ^ (__partial_logits__, __tree_ids__, __node_ids__) -- -- * __partial_logits__ -- -- * __tree_ids__ -- -- * __node_ids__ boostedTreesTrainingPredict = boostedTreesTrainingPredict' id boostedTreesTrainingPredict' :: forall v'1 v'2 v'3 v'4 m' . (MonadBuild m') => OpParams -> Data.Int.Int64 -- ^ __logits_dimension__ -> Tensor v'1 ResourceHandle -- ^ __tree_ensemble_handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __cached_tree_ids__ -> Tensor v'3 Data.Int.Int32 -- ^ __cached_node_ids__ -> [Tensor v'4 Data.Int.Int32] -- ^ __bucketized_features__ -> m' ((Tensor Value Float, Tensor Value Data.Int.Int32, Tensor Value Data.Int.Int32)) -- ^ (__partial_logits__, __tree_ids__, __node_ids__) -- -- * __partial_logits__ -- -- * __tree_ids__ -- -- * __node_ids__ boostedTreesTrainingPredict' op'options logits_dimension tree_ensemble_handle cached_tree_ids cached_node_ids bucketized_features | eqLengthGuard [("num_bucketized_features", [("bucketized_features", length bucketized_features)])] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs tree_ensemble_handle, buildInputs cached_tree_ids, buildInputs cached_node_ids, buildInputs bucketized_features] buildOp [] (opDef "BoostedTreesTrainingPredict" & opAttr "logits_dimension" .~ logits_dimension & opAttr "num_bucketized_features" .~ num_bucketized_features & op'options & opInputs .~ op'inputs) where num_bucketized_features = fromIntegral (length bucketized_features) :: Int64 {- input_arg { name: "tree_ensemble_handle" type: DT_RESOURCE } input_arg { name: "cached_tree_ids" type: DT_INT32 } input_arg { name: "cached_node_ids" type: DT_INT32 } input_arg { name: "bucketized_features" type: DT_INT32 number_attr: "num_bucketized_features" } output_arg { name: "partial_logits" type: DT_FLOAT } output_arg { name: "tree_ids" type: DT_INT32 } output_arg { name: "node_ids" type: DT_INT32 } attr { name: "num_bucketized_features" type: "int" has_minimum: true minimum: 1 } attr { name: "logits_dimension" type: "int" } -} -- | boostedTreesUpdateEnsemble :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 m' . (MonadBuild m') => Data.Int.Int64 -- ^ __pruning_mode__ -> Tensor v'1 ResourceHandle -- ^ __tree_ensemble_handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __feature_ids__ -> [Tensor v'3 Data.Int.Int32] -- ^ __node_ids__ -> [Tensor v'4 Float] -- ^ __gains__ -> [Tensor v'5 Data.Int.Int32] -- ^ __thresholds__ -> [Tensor v'6 Float] -- ^ __left_node_contribs__ -> [Tensor v'7 Float] -- ^ __right_node_contribs__ -> Tensor v'8 Data.Int.Int32 -- ^ __max_depth__ -> Tensor v'9 Float -- ^ __learning_rate__ -> m' (ControlNode) boostedTreesUpdateEnsemble = boostedTreesUpdateEnsemble' id boostedTreesUpdateEnsemble' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 m' . (MonadBuild m') => OpParams -> Data.Int.Int64 -- ^ __pruning_mode__ -> Tensor v'1 ResourceHandle -- ^ __tree_ensemble_handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __feature_ids__ -> [Tensor v'3 Data.Int.Int32] -- ^ __node_ids__ -> [Tensor v'4 Float] -- ^ __gains__ -> [Tensor v'5 Data.Int.Int32] -- ^ __thresholds__ -> [Tensor v'6 Float] -- ^ __left_node_contribs__ -> [Tensor v'7 Float] -- ^ __right_node_contribs__ -> Tensor v'8 Data.Int.Int32 -- ^ __max_depth__ -> Tensor v'9 Float -- ^ __learning_rate__ -> m' (ControlNode) boostedTreesUpdateEnsemble' op'options pruning_mode tree_ensemble_handle feature_ids node_ids gains thresholds left_node_contribs right_node_contribs max_depth learning_rate | eqLengthGuard [("num_features", [("node_ids", length node_ids), ("gains", length gains), ("thresholds", length thresholds), ("left_node_contribs", length left_node_contribs), ("right_node_contribs", length right_node_contribs)])] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs tree_ensemble_handle, buildInputs feature_ids, buildInputs node_ids, buildInputs gains, buildInputs thresholds, buildInputs left_node_contribs, buildInputs right_node_contribs, buildInputs max_depth, buildInputs learning_rate] buildOp [] (opDef "BoostedTreesUpdateEnsemble" & opAttr "pruning_mode" .~ pruning_mode & opAttr "num_features" .~ num_features & op'options & opInputs .~ op'inputs) where num_features = fromIntegral (length node_ids) :: Int64 {- input_arg { name: "tree_ensemble_handle" type: DT_RESOURCE } input_arg { name: "feature_ids" type: DT_INT32 } input_arg { name: "node_ids" type: DT_INT32 number_attr: "num_features" } input_arg { name: "gains" type: DT_FLOAT number_attr: "num_features" } input_arg { name: "thresholds" type: DT_INT32 number_attr: "num_features" } input_arg { name: "left_node_contribs" type: DT_FLOAT number_attr: "num_features" } input_arg { name: "right_node_contribs" type: DT_FLOAT number_attr: "num_features" } input_arg { name: "max_depth" type: DT_INT32 } input_arg { name: "learning_rate" type: DT_FLOAT } attr { name: "pruning_mode" type: "int" has_minimum: true } attr { name: "num_features" type: "int" has_minimum: true } -} -- | broadcastArgs :: forall v'1 v'2 t . (OneOf '[Data.Int.Int32, Data.Int.Int64] t) => Tensor v'1 t -- ^ __s0__ -> Tensor v'2 t -- ^ __s1__ -> Tensor Build t -- ^ __r0__ broadcastArgs = broadcastArgs' id broadcastArgs' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int32, Data.Int.Int64] t) => OpParams -> Tensor v'1 t -- ^ __s0__ -> Tensor v'2 t -- ^ __s1__ -> Tensor Build t -- ^ __r0__ broadcastArgs' op'options s0 s1 | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs s0, buildInputs s1] return (opDef "BroadcastArgs" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "s0" type_attr: "T" } input_arg { name: "s1" type_attr: "T" } output_arg { name: "r0" type_attr: "T" } attr { name: "T" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | broadcastGradientArgs :: forall v'1 v'2 t . (OneOf '[Data.Int.Int32, Data.Int.Int64] t) => Tensor v'1 t -- ^ __s0__ -> Tensor v'2 t -- ^ __s1__ -> (Tensor Build t, Tensor Build t) -- ^ (__r0__, __r1__) -- -- * __r0__ -- -- * __r1__ broadcastGradientArgs = broadcastGradientArgs' id broadcastGradientArgs' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int32, Data.Int.Int64] t) => OpParams -> Tensor v'1 t -- ^ __s0__ -> Tensor v'2 t -- ^ __s1__ -> (Tensor Build t, Tensor Build t) -- ^ (__r0__, __r1__) -- -- * __r0__ -- -- * __r1__ broadcastGradientArgs' op'options s0 s1 | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs s0, buildInputs s1] return (opDef "BroadcastGradientArgs" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "s0" type_attr: "T" } input_arg { name: "s1" type_attr: "T" } output_arg { name: "r0" type_attr: "T" } output_arg { name: "r1" type_attr: "T" } attr { name: "T" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | broadcastTo :: forall v'1 v'2 t tidx . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 tidx -- ^ __shape__ -> Tensor Build t -- ^ __output__ broadcastTo = broadcastTo' id broadcastTo' :: forall v'1 v'2 t tidx . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 tidx -- ^ __shape__ -> Tensor Build t -- ^ __output__ broadcastTo' op'options input shape | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs shape] return (opDef "BroadcastTo" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tidx" .~ tensorType (undefined :: tidx) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "shape" type_attr: "Tidx" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "Tidx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | bucketize :: forall v'1 t . (OneOf '[Data.Int.Int32, Data.Int.Int64, Double, Float] t) => Tensor v'1 t -- ^ __input__ -> Tensor Build Data.Int.Int32 -- ^ __output__ bucketize = bucketize' id bucketize' :: forall v'1 t . (OneOf '[Data.Int.Int32, Data.Int.Int64, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor Build Data.Int.Int32 -- ^ __output__ bucketize' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "Bucketize" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type: DT_INT32 } attr { name: "T" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "boundaries" type: "list(float)" } -} -- | cTCBeamSearchDecoder :: Data.Int.Int64 -- ^ __beam_width__ -> Data.Int.Int64 -- ^ __top_paths__ -> Tensor v'1 Float -- ^ __inputs__ -> Tensor v'2 Data.Int.Int32 -- ^ __sequence_length__ -> ([Tensor Build Data.Int.Int64], [Tensor Build Data.Int.Int64], [Tensor Build Data.Int.Int64], Tensor Build Float) -- ^ (__decoded_indices__, __decoded_values__, __decoded_shape__, __log_probability__) -- -- * __decoded_indices__ -- -- * __decoded_values__ -- -- * __decoded_shape__ -- -- * __log_probability__ cTCBeamSearchDecoder = cTCBeamSearchDecoder' id cTCBeamSearchDecoder' :: OpParams -> Data.Int.Int64 -- ^ __beam_width__ -> Data.Int.Int64 -- ^ __top_paths__ -> Tensor v'1 Float -- ^ __inputs__ -> Tensor v'2 Data.Int.Int32 -- ^ __sequence_length__ -> ([Tensor Build Data.Int.Int64], [Tensor Build Data.Int.Int64], [Tensor Build Data.Int.Int64], Tensor Build Float) -- ^ (__decoded_indices__, __decoded_values__, __decoded_shape__, __log_probability__) -- -- * __decoded_indices__ -- -- * __decoded_values__ -- -- * __decoded_shape__ -- -- * __log_probability__ cTCBeamSearchDecoder' op'options beam_width top_paths inputs sequence_length | eqLengthGuard [] = pureOp [top_paths, top_paths, top_paths] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs inputs, buildInputs sequence_length] return (opDef "CTCBeamSearchDecoder" & opAttr "beam_width" .~ beam_width & opAttr "top_paths" .~ top_paths & op'options & opInputs .~ op'inputs) {- input_arg { name: "inputs" type: DT_FLOAT } input_arg { name: "sequence_length" type: DT_INT32 } output_arg { name: "decoded_indices" type: DT_INT64 number_attr: "top_paths" } output_arg { name: "decoded_values" type: DT_INT64 number_attr: "top_paths" } output_arg { name: "decoded_shape" type: DT_INT64 number_attr: "top_paths" } output_arg { name: "log_probability" type: DT_FLOAT } attr { name: "beam_width" type: "int" has_minimum: true minimum: 1 } attr { name: "top_paths" type: "int" has_minimum: true minimum: 1 } attr { name: "merge_repeated" type: "bool" default_value { b: true } } -} -- | cTCGreedyDecoder :: Tensor v'1 Float -- ^ __inputs__ -> Tensor v'2 Data.Int.Int32 -- ^ __sequence_length__ -> (Tensor Build Data.Int.Int64, Tensor Build Data.Int.Int64, Tensor Build Data.Int.Int64, Tensor Build Float) -- ^ (__decoded_indices__, __decoded_values__, __decoded_shape__, __log_probability__) -- -- * __decoded_indices__ -- -- * __decoded_values__ -- -- * __decoded_shape__ -- -- * __log_probability__ cTCGreedyDecoder = cTCGreedyDecoder' id cTCGreedyDecoder' :: OpParams -> Tensor v'1 Float -- ^ __inputs__ -> Tensor v'2 Data.Int.Int32 -- ^ __sequence_length__ -> (Tensor Build Data.Int.Int64, Tensor Build Data.Int.Int64, Tensor Build Data.Int.Int64, Tensor Build Float) -- ^ (__decoded_indices__, __decoded_values__, __decoded_shape__, __log_probability__) -- -- * __decoded_indices__ -- -- * __decoded_values__ -- -- * __decoded_shape__ -- -- * __log_probability__ cTCGreedyDecoder' op'options inputs sequence_length | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs inputs, buildInputs sequence_length] return (opDef "CTCGreedyDecoder" & op'options & opInputs .~ op'inputs) {- input_arg { name: "inputs" type: DT_FLOAT } input_arg { name: "sequence_length" type: DT_INT32 } output_arg { name: "decoded_indices" type: DT_INT64 } output_arg { name: "decoded_values" type: DT_INT64 } output_arg { name: "decoded_shape" type: DT_INT64 } output_arg { name: "log_probability" type: DT_FLOAT } attr { name: "merge_repeated" type: "bool" default_value { b: false } } -} -- | cTCLoss :: Tensor v'1 Float -- ^ __inputs__ -> Tensor v'2 Data.Int.Int64 -- ^ __labels_indices__ -> Tensor v'3 Data.Int.Int32 -- ^ __labels_values__ -> Tensor v'4 Data.Int.Int32 -- ^ __sequence_length__ -> (Tensor Build Float, Tensor Build Float) -- ^ (__loss__, __gradient__) -- -- * __loss__ -- -- * __gradient__ cTCLoss = cTCLoss' id cTCLoss' :: OpParams -> Tensor v'1 Float -- ^ __inputs__ -> Tensor v'2 Data.Int.Int64 -- ^ __labels_indices__ -> Tensor v'3 Data.Int.Int32 -- ^ __labels_values__ -> Tensor v'4 Data.Int.Int32 -- ^ __sequence_length__ -> (Tensor Build Float, Tensor Build Float) -- ^ (__loss__, __gradient__) -- -- * __loss__ -- -- * __gradient__ cTCLoss' op'options inputs labels_indices labels_values sequence_length | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs inputs, buildInputs labels_indices, buildInputs labels_values, buildInputs sequence_length] return (opDef "CTCLoss" & op'options & opInputs .~ op'inputs) {- input_arg { name: "inputs" type: DT_FLOAT } input_arg { name: "labels_indices" type: DT_INT64 } input_arg { name: "labels_values" type: DT_INT32 } input_arg { name: "sequence_length" type: DT_INT32 } output_arg { name: "loss" type: DT_FLOAT } output_arg { name: "gradient" type: DT_FLOAT } attr { name: "preprocess_collapse_repeated" type: "bool" default_value { b: false } } attr { name: "ctc_merge_repeated" type: "bool" default_value { b: true } } attr { name: "ignore_longer_outputs_than_inputs" type: "bool" default_value { b: false } } -} -- | cacheDataset :: [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __filename__ -> Tensor Build Variant -- ^ __handle__ cacheDataset = cacheDataset' id cacheDataset' :: OpParams -> [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __filename__ -> Tensor Build Variant -- ^ __handle__ cacheDataset' op'options output_types input_dataset filename | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_dataset, buildInputs filename] return (opDef "CacheDataset" & opAttr "output_types" .~ output_types & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_dataset" type: DT_VARIANT } input_arg { name: "filename" type: DT_STRING } output_arg { name: "handle" type: DT_VARIANT } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } -} -- | cast :: forall v'1 srcT dstT . (TensorType srcT, TensorType dstT) => Tensor v'1 srcT -- ^ __x__ -> Tensor Build dstT -- ^ __y__ cast = cast' id cast' :: forall v'1 srcT dstT . (TensorType srcT, TensorType dstT) => OpParams -> Tensor v'1 srcT -- ^ __x__ -> Tensor Build dstT -- ^ __y__ cast' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "Cast" & opAttr "SrcT" .~ tensorType (undefined :: srcT) & opAttr "DstT" .~ tensorType (undefined :: dstT) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "SrcT" } output_arg { name: "y" type_attr: "DstT" } attr { name: "SrcT" type: "type" } attr { name: "DstT" type: "type" } attr { name: "Truncate" type: "bool" default_value { b: false } } -} -- | ceil :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ ceil = ceil' id ceil' :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ ceil' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "Ceil" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } -} -- | checkNumerics :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __tensor__ -> Tensor Build t -- ^ __output__ checkNumerics = checkNumerics' id checkNumerics' :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __tensor__ -> Tensor Build t -- ^ __output__ checkNumerics' op'options tensor | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs tensor] return (opDef "CheckNumerics" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "tensor" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "message" type: "string" } -} -- | cholesky :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ cholesky = cholesky' id cholesky' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ cholesky' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "Cholesky" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_DOUBLE type: DT_FLOAT type: DT_HALF type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | choleskyGrad :: forall v'1 v'2 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __l__ -> Tensor v'2 t -- ^ __grad__ -> Tensor Build t -- ^ __output__ choleskyGrad = choleskyGrad' id choleskyGrad' :: forall v'1 v'2 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __l__ -> Tensor v'2 t -- ^ __grad__ -> Tensor Build t -- ^ __output__ choleskyGrad' op'options l grad | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs l, buildInputs grad] return (opDef "CholeskyGrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "l" type_attr: "T" } input_arg { name: "grad" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } -} -- | clipByValue :: forall v'1 v'2 v'3 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __t__ -> Tensor v'2 t -- ^ __clip_value_min__ -> Tensor v'3 t -- ^ __clip_value_max__ -> Tensor Build t -- ^ __output__ clipByValue = clipByValue' id clipByValue' :: forall v'1 v'2 v'3 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __t__ -> Tensor v'2 t -- ^ __clip_value_min__ -> Tensor v'3 t -- ^ __clip_value_max__ -> Tensor Build t -- ^ __output__ clipByValue' op'options t clip_value_min clip_value_max | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs t, buildInputs clip_value_min, buildInputs clip_value_max] return (opDef "ClipByValue" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "t" type_attr: "T" } input_arg { name: "clip_value_min" type_attr: "T" } input_arg { name: "clip_value_max" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } -} -- | closeSummaryWriter :: forall v'1 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __writer__ -> m' (ControlNode) closeSummaryWriter = closeSummaryWriter' id closeSummaryWriter' :: forall v'1 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __writer__ -> m' (ControlNode) closeSummaryWriter' op'options writer | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs writer] buildOp [] (opDef "CloseSummaryWriter" & op'options & opInputs .~ op'inputs) {- input_arg { name: "writer" type: DT_RESOURCE } -} -- | collectiveBcastRecv :: forall t m' . (MonadBuild m', OneOf '[Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => Data.Int.Int64 -- ^ __group_key__ -> Data.Int.Int64 -- ^ __group_size__ -> Data.Int.Int64 -- ^ __instance_key__ -> Shape -- ^ __shape__ -> m' (Tensor Value t) -- ^ __data__ collectiveBcastRecv = collectiveBcastRecv' id collectiveBcastRecv' :: forall t m' . (MonadBuild m', OneOf '[Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => OpParams -> Data.Int.Int64 -- ^ __group_key__ -> Data.Int.Int64 -- ^ __group_size__ -> Data.Int.Int64 -- ^ __instance_key__ -> Shape -- ^ __shape__ -> m' (Tensor Value t) -- ^ __data__ collectiveBcastRecv' op'options group_key group_size instance_key shape | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "CollectiveBcastRecv" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "group_key" .~ group_key & opAttr "group_size" .~ group_size & opAttr "instance_key" .~ instance_key & opAttr "shape" .~ shape & op'options & opInputs .~ op'inputs) {- output_arg { name: "data" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_HALF type: DT_DOUBLE type: DT_INT32 type: DT_INT64 } } } attr { name: "group_size" type: "int" } attr { name: "group_key" type: "int" } attr { name: "instance_key" type: "int" } attr { name: "shape" type: "shape" } -} -- | collectiveBcastSend :: forall v'1 t m' . (MonadBuild m', OneOf '[Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => Data.Int.Int64 -- ^ __group_key__ -> Data.Int.Int64 -- ^ __group_size__ -> Data.Int.Int64 -- ^ __instance_key__ -> Shape -- ^ __shape__ -> Tensor v'1 t -- ^ __input__ -> m' (Tensor Value t) -- ^ __data__ collectiveBcastSend = collectiveBcastSend' id collectiveBcastSend' :: forall v'1 t m' . (MonadBuild m', OneOf '[Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => OpParams -> Data.Int.Int64 -- ^ __group_key__ -> Data.Int.Int64 -- ^ __group_size__ -> Data.Int.Int64 -- ^ __instance_key__ -> Shape -- ^ __shape__ -> Tensor v'1 t -- ^ __input__ -> m' (Tensor Value t) -- ^ __data__ collectiveBcastSend' op'options group_key group_size instance_key shape input | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] buildOp [] (opDef "CollectiveBcastSend" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "group_key" .~ group_key & opAttr "group_size" .~ group_size & opAttr "instance_key" .~ instance_key & opAttr "shape" .~ shape & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "data" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_HALF type: DT_DOUBLE type: DT_INT32 type: DT_INT64 } } } attr { name: "group_size" type: "int" } attr { name: "group_key" type: "int" } attr { name: "instance_key" type: "int" } attr { name: "shape" type: "shape" } -} -- | collectiveGather :: forall v'1 t m' . (MonadBuild m', OneOf '[Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => Data.Int.Int64 -- ^ __group_key__ -> Data.Int.Int64 -- ^ __group_size__ -> Data.Int.Int64 -- ^ __instance_key__ -> Shape -- ^ __shape__ -> Tensor v'1 t -- ^ __input__ -> m' (Tensor Value t) -- ^ __data__ collectiveGather = collectiveGather' id collectiveGather' :: forall v'1 t m' . (MonadBuild m', OneOf '[Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => OpParams -> Data.Int.Int64 -- ^ __group_key__ -> Data.Int.Int64 -- ^ __group_size__ -> Data.Int.Int64 -- ^ __instance_key__ -> Shape -- ^ __shape__ -> Tensor v'1 t -- ^ __input__ -> m' (Tensor Value t) -- ^ __data__ collectiveGather' op'options group_key group_size instance_key shape input | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] buildOp [] (opDef "CollectiveGather" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "group_key" .~ group_key & opAttr "group_size" .~ group_size & opAttr "instance_key" .~ instance_key & opAttr "shape" .~ shape & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "data" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_HALF type: DT_DOUBLE type: DT_INT32 type: DT_INT64 } } } attr { name: "group_size" type: "int" } attr { name: "group_key" type: "int" } attr { name: "instance_key" type: "int" } attr { name: "shape" type: "shape" } -} -- | collectivePermute :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 Data.Int.Int32 -- ^ __source_target_pairs__ -> Tensor Build t -- ^ __output__ collectivePermute = collectivePermute' id collectivePermute' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 Data.Int.Int32 -- ^ __source_target_pairs__ -> Tensor Build t -- ^ __output__ collectivePermute' op'options input source_target_pairs | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs source_target_pairs] return (opDef "CollectivePermute" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "source_target_pairs" type: DT_INT32 } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } -} -- | collectiveReduce :: forall v'1 t m' . (MonadBuild m', OneOf '[Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => Data.Int.Int64 -- ^ __group_key__ -> Data.Int.Int64 -- ^ __group_size__ -> Data.Int.Int64 -- ^ __instance_key__ -> Tensor v'1 t -- ^ __input__ -> m' (Tensor Value t) -- ^ __data__ collectiveReduce = collectiveReduce' id collectiveReduce' :: forall v'1 t m' . (MonadBuild m', OneOf '[Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => OpParams -> Data.Int.Int64 -- ^ __group_key__ -> Data.Int.Int64 -- ^ __group_size__ -> Data.Int.Int64 -- ^ __instance_key__ -> Tensor v'1 t -- ^ __input__ -> m' (Tensor Value t) -- ^ __data__ collectiveReduce' op'options group_key group_size instance_key input | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] buildOp [] (opDef "CollectiveReduce" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "group_key" .~ group_key & opAttr "group_size" .~ group_size & opAttr "instance_key" .~ instance_key & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "data" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_HALF type: DT_DOUBLE type: DT_INT32 type: DT_INT64 } } } attr { name: "group_size" type: "int" } attr { name: "group_key" type: "int" } attr { name: "instance_key" type: "int" } attr { name: "merge_op" type: "string" allowed_values { list { s: "Min" s: "Max" s: "Mul" s: "Add" } } } attr { name: "final_op" type: "string" allowed_values { list { s: "Id" s: "Div" } } } attr { name: "subdiv_offsets" type: "list(int)" } attr { name: "wait_for" type: "list(int)" default_value { list { } } } -} -- | combinedNonMaxSuppression :: Tensor v'1 Float -- ^ __boxes__ -> Tensor v'2 Float -- ^ __scores__ -> Tensor v'3 Data.Int.Int32 -- ^ __max_output_size_per_class__ -> Tensor v'4 Data.Int.Int32 -- ^ __max_total_size__ -> Tensor v'5 Float -- ^ __iou_threshold__ -> Tensor v'6 Float -- ^ __score_threshold__ -> (Tensor Build Float, Tensor Build Float, Tensor Build Float, Tensor Build Data.Int.Int32) -- ^ (__nmsed_boxes__, __nmsed_scores__, __nmsed_classes__, __valid_detections__) -- -- * __nmsed_boxes__ -- -- * __nmsed_scores__ -- -- * __nmsed_classes__ -- -- * __valid_detections__ combinedNonMaxSuppression = combinedNonMaxSuppression' id combinedNonMaxSuppression' :: OpParams -> Tensor v'1 Float -- ^ __boxes__ -> Tensor v'2 Float -- ^ __scores__ -> Tensor v'3 Data.Int.Int32 -- ^ __max_output_size_per_class__ -> Tensor v'4 Data.Int.Int32 -- ^ __max_total_size__ -> Tensor v'5 Float -- ^ __iou_threshold__ -> Tensor v'6 Float -- ^ __score_threshold__ -> (Tensor Build Float, Tensor Build Float, Tensor Build Float, Tensor Build Data.Int.Int32) -- ^ (__nmsed_boxes__, __nmsed_scores__, __nmsed_classes__, __valid_detections__) -- -- * __nmsed_boxes__ -- -- * __nmsed_scores__ -- -- * __nmsed_classes__ -- -- * __valid_detections__ combinedNonMaxSuppression' op'options boxes scores max_output_size_per_class max_total_size iou_threshold score_threshold | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs boxes, buildInputs scores, buildInputs max_output_size_per_class, buildInputs max_total_size, buildInputs iou_threshold, buildInputs score_threshold] return (opDef "CombinedNonMaxSuppression" & op'options & opInputs .~ op'inputs) {- input_arg { name: "boxes" type: DT_FLOAT } input_arg { name: "scores" type: DT_FLOAT } input_arg { name: "max_output_size_per_class" type: DT_INT32 } input_arg { name: "max_total_size" type: DT_INT32 } input_arg { name: "iou_threshold" type: DT_FLOAT } input_arg { name: "score_threshold" type: DT_FLOAT } output_arg { name: "nmsed_boxes" type: DT_FLOAT } output_arg { name: "nmsed_scores" type: DT_FLOAT } output_arg { name: "nmsed_classes" type: DT_FLOAT } output_arg { name: "valid_detections" type: DT_INT32 } attr { name: "pad_per_class" type: "bool" default_value { b: false } } attr { name: "clip_boxes" type: "bool" default_value { b: true } } -} -- | compareAndBitpack :: forall v'1 v'2 t . (OneOf '[Bool, Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 t -- ^ __threshold__ -> Tensor Build Data.Word.Word8 -- ^ __output__ compareAndBitpack = compareAndBitpack' id compareAndBitpack' :: forall v'1 v'2 t . (OneOf '[Bool, Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 t -- ^ __threshold__ -> Tensor Build Data.Word.Word8 -- ^ __output__ compareAndBitpack' op'options input threshold | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs threshold] return (opDef "CompareAndBitpack" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "threshold" type_attr: "T" } output_arg { name: "output" type: DT_UINT8 } attr { name: "T" type: "type" allowed_values { list { type: DT_BOOL type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT8 type: DT_INT16 type: DT_INT32 type: DT_INT64 } } } -} -- | complex :: forall v'1 v'2 t tout . (OneOf '[Double, Float] t, OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float)] tout) => Tensor v'1 t -- ^ __real__ -> Tensor v'2 t -- ^ __imag__ -> Tensor Build tout -- ^ __out__ complex = complex' id complex' :: forall v'1 v'2 t tout . (OneOf '[Double, Float] t, OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float)] tout) => OpParams -> Tensor v'1 t -- ^ __real__ -> Tensor v'2 t -- ^ __imag__ -> Tensor Build tout -- ^ __out__ complex' op'options real imag | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs real, buildInputs imag] return (opDef "Complex" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tout" .~ tensorType (undefined :: tout) & op'options & opInputs .~ op'inputs) {- input_arg { name: "real" type_attr: "T" } input_arg { name: "imag" type_attr: "T" } output_arg { name: "out" type_attr: "Tout" } attr { name: "T" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "Tout" type: "type" default_value { type: DT_COMPLEX64 } allowed_values { list { type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | complexAbs :: forall v'1 t tout . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float)] t, OneOf '[Double, Float] tout) => Tensor v'1 t -- ^ __x__ -> Tensor Build tout -- ^ __y__ complexAbs = complexAbs' id complexAbs' :: forall v'1 t tout . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float)] t, OneOf '[Double, Float] tout) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build tout -- ^ __y__ complexAbs' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "ComplexAbs" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tout" .~ tensorType (undefined :: tout) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "Tout" } attr { name: "T" type: "type" default_value { type: DT_COMPLEX64 } allowed_values { list { type: DT_COMPLEX64 type: DT_COMPLEX128 } } } attr { name: "Tout" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } -} -- | computeAccidentalHits :: Data.Int.Int64 -- ^ __num_true__ -> Tensor v'1 Data.Int.Int64 -- ^ __true_classes__ -> Tensor v'2 Data.Int.Int64 -- ^ __sampled_candidates__ -> (Tensor Build Data.Int.Int32, Tensor Build Data.Int.Int64, Tensor Build Float) -- ^ (__indices__, __ids__, __weights__) -- -- * __indices__ -- -- * __ids__ -- -- * __weights__ computeAccidentalHits = computeAccidentalHits' id computeAccidentalHits' :: OpParams -> Data.Int.Int64 -- ^ __num_true__ -> Tensor v'1 Data.Int.Int64 -- ^ __true_classes__ -> Tensor v'2 Data.Int.Int64 -- ^ __sampled_candidates__ -> (Tensor Build Data.Int.Int32, Tensor Build Data.Int.Int64, Tensor Build Float) -- ^ (__indices__, __ids__, __weights__) -- -- * __indices__ -- -- * __ids__ -- -- * __weights__ computeAccidentalHits' op'options num_true true_classes sampled_candidates | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs true_classes, buildInputs sampled_candidates] return (opDef "ComputeAccidentalHits" & opAttr "num_true" .~ num_true & op'options & opInputs .~ op'inputs) {- input_arg { name: "true_classes" type: DT_INT64 } input_arg { name: "sampled_candidates" type: DT_INT64 } output_arg { name: "indices" type: DT_INT32 } output_arg { name: "ids" type: DT_INT64 } output_arg { name: "weights" type: DT_FLOAT } attr { name: "num_true" type: "int" } attr { name: "seed" type: "int" default_value { i: 0 } } attr { name: "seed2" type: "int" default_value { i: 0 } } -} -- | concat :: forall v'1 v'2 t . (TensorType t) => Tensor v'1 Data.Int.Int32 -- ^ __concat_dim__ -> [Tensor v'2 t] -- ^ __values__ -> Tensor Build t -- ^ __output__ concat = concat' id concat' :: forall v'1 v'2 t . (TensorType t) => OpParams -> Tensor v'1 Data.Int.Int32 -- ^ __concat_dim__ -> [Tensor v'2 t] -- ^ __values__ -> Tensor Build t -- ^ __output__ concat' op'options concat_dim values | eqLengthGuard [("N", [("values", length values)])] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs concat_dim, buildInputs values] return (opDef "Concat" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "N" .~ n & op'options & opInputs .~ op'inputs) where n = fromIntegral (length values) :: Int64 {- input_arg { name: "concat_dim" type: DT_INT32 } input_arg { name: "values" type_attr: "T" number_attr: "N" } output_arg { name: "output" type_attr: "T" } attr { name: "N" type: "int" has_minimum: true minimum: 2 } attr { name: "T" type: "type" } -} -- | concatOffset :: Tensor v'1 Data.Int.Int32 -- ^ __concat_dim__ -> [Tensor v'2 Data.Int.Int32] -- ^ __shape__ -> [Tensor Build Data.Int.Int32] -- ^ __offset__ concatOffset = concatOffset' id concatOffset' :: OpParams -> Tensor v'1 Data.Int.Int32 -- ^ __concat_dim__ -> [Tensor v'2 Data.Int.Int32] -- ^ __shape__ -> [Tensor Build Data.Int.Int32] -- ^ __offset__ concatOffset' op'options concat_dim shape | eqLengthGuard [("N", [("shape", length shape)])] = pureOp [n] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs concat_dim, buildInputs shape] return (opDef "ConcatOffset" & opAttr "N" .~ n & op'options & opInputs .~ op'inputs) where n = fromIntegral (length shape) :: Int64 {- input_arg { name: "concat_dim" type: DT_INT32 } input_arg { name: "shape" type: DT_INT32 number_attr: "N" } output_arg { name: "offset" type: DT_INT32 number_attr: "N" } attr { name: "N" type: "int" has_minimum: true minimum: 2 } -} -- | concatV2 :: forall v'1 v'2 t tidx . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => [Tensor v'1 t] -- ^ __values__ -> Tensor v'2 tidx -- ^ __axis__ -> Tensor Build t -- ^ __output__ concatV2 = concatV2' id concatV2' :: forall v'1 v'2 t tidx . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => OpParams -> [Tensor v'1 t] -- ^ __values__ -> Tensor v'2 tidx -- ^ __axis__ -> Tensor Build t -- ^ __output__ concatV2' op'options values axis | eqLengthGuard [("N", [("values", length values)])] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs values, buildInputs axis] return (opDef "ConcatV2" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tidx" .~ tensorType (undefined :: tidx) & opAttr "N" .~ n & op'options & opInputs .~ op'inputs) where n = fromIntegral (length values) :: Int64 {- input_arg { name: "values" type_attr: "T" number_attr: "N" } input_arg { name: "axis" type_attr: "Tidx" } output_arg { name: "output" type_attr: "T" } attr { name: "N" type: "int" has_minimum: true minimum: 2 } attr { name: "T" type: "type" } attr { name: "Tidx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | concatenateDataset :: [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor v'2 Variant -- ^ __another_dataset__ -> Tensor Build Variant -- ^ __handle__ concatenateDataset = concatenateDataset' id concatenateDataset' :: OpParams -> [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor v'2 Variant -- ^ __another_dataset__ -> Tensor Build Variant -- ^ __handle__ concatenateDataset' op'options output_types input_dataset another_dataset | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_dataset, buildInputs another_dataset] return (opDef "ConcatenateDataset" & opAttr "output_types" .~ output_types & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_dataset" type: DT_VARIANT } input_arg { name: "another_dataset" type: DT_VARIANT } output_arg { name: "handle" type: DT_VARIANT } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } -} -- | conditionalAccumulator :: forall m' . (MonadBuild m') => DataType -- ^ __dtype__ -> Shape -- ^ __shape__ -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __handle__ conditionalAccumulator = conditionalAccumulator' id conditionalAccumulator' :: forall m' . (MonadBuild m') => OpParams -> DataType -- ^ __dtype__ -> Shape -- ^ __shape__ -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __handle__ conditionalAccumulator' op'options dtype shape | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "ConditionalAccumulator" & opAttr "dtype" .~ dtype & opAttr "shape" .~ shape & op'options & opInputs .~ op'inputs) {- output_arg { name: "handle" type: DT_STRING is_ref: true } attr { name: "dtype" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "shape" type: "shape" } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } attr { name: "reduction_type" type: "string" default_value { s: "MEAN" } allowed_values { list { s: "MEAN" s: "SUM" } } } -} -- | configureDistributedTPU :: forall m' . (MonadBuild m') => m' (Tensor Value Data.ByteString.ByteString) -- ^ __topology__ configureDistributedTPU = configureDistributedTPU' id configureDistributedTPU' :: forall m' . (MonadBuild m') => OpParams -> m' (Tensor Value Data.ByteString.ByteString) -- ^ __topology__ configureDistributedTPU' op'options | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "ConfigureDistributedTPU" & op'options & opInputs .~ op'inputs) {- output_arg { name: "topology" type: DT_STRING } attr { name: "embedding_config" type: "string" default_value { s: "" } } attr { name: "tpu_embedding_config" type: "string" default_value { s: "" } } attr { name: "is_global_init" type: "bool" default_value { b: false } } -} -- | conj :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Variant] t) => Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ conj = conj' id conj' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Variant] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ conj' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "Conj" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" default_value { type: DT_COMPLEX64 } allowed_values { list { type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_VARIANT } } } -} -- | conjugateTranspose :: forall v'1 v'2 t tperm . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tperm) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 tperm -- ^ __perm__ -> Tensor Build t -- ^ __y__ conjugateTranspose = conjugateTranspose' id conjugateTranspose' :: forall v'1 v'2 t tperm . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tperm) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 tperm -- ^ __perm__ -> Tensor Build t -- ^ __y__ conjugateTranspose' op'options x perm | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs perm] return (opDef "ConjugateTranspose" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tperm" .~ tensorType (undefined :: tperm) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "perm" type_attr: "Tperm" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "Tperm" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | const :: forall dtype . (TensorType dtype) => Tensor Build dtype -- ^ __output__ const = const' id const' :: forall dtype . (TensorType dtype) => OpParams -> Tensor Build dtype -- ^ __output__ const' op'options | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] return (opDef "Const" & opAttr "dtype" .~ tensorType (undefined :: dtype) & op'options & opInputs .~ op'inputs) {- output_arg { name: "output" type_attr: "dtype" } attr { name: "value" type: "tensor" } attr { name: "dtype" type: "type" } -} -- | consumeMutexLock :: forall v'1 m' . (MonadBuild m') => Tensor v'1 Variant -- ^ __mutex_lock__ -> m' (ControlNode) consumeMutexLock = consumeMutexLock' id consumeMutexLock' :: forall v'1 m' . (MonadBuild m') => OpParams -> Tensor v'1 Variant -- ^ __mutex_lock__ -> m' (ControlNode) consumeMutexLock' op'options mutex_lock | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs mutex_lock] buildOp [] (opDef "ConsumeMutexLock" & op'options & opInputs .~ op'inputs) {- input_arg { name: "mutex_lock" type: DT_VARIANT } -} -- | controlTrigger :: forall m' . (MonadBuild m') => m' (ControlNode) controlTrigger = controlTrigger' id controlTrigger' :: forall m' . (MonadBuild m') => OpParams -> m' (ControlNode) controlTrigger' op'options | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "ControlTrigger" & op'options & opInputs .~ op'inputs) {- -} -- | conv2D :: forall v'1 v'2 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 t -- ^ __filter__ -> Tensor Build t -- ^ __output__ conv2D = conv2D' id conv2D' :: forall v'1 v'2 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 t -- ^ __filter__ -> Tensor Build t -- ^ __output__ conv2D' op'options input filter | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs filter] return (opDef "Conv2D" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "filter" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_BFLOAT16 type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "strides" type: "list(int)" } attr { name: "use_cudnn_on_gpu" type: "bool" default_value { b: true } } attr { name: "padding" type: "string" allowed_values { list { s: "SAME" s: "VALID" s: "EXPLICIT" } } } attr { name: "explicit_paddings" type: "list(int)" default_value { list { } } } attr { name: "data_format" type: "string" default_value { s: "NHWC" } allowed_values { list { s: "NHWC" s: "NCHW" } } } attr { name: "dilations" type: "list(int)" default_value { list { i: 1 i: 1 i: 1 i: 1 } } } -} -- | conv2DBackpropFilter :: forall v'1 v'2 v'3 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 Data.Int.Int32 -- ^ __filter_sizes__ -> Tensor v'3 t -- ^ __out_backprop__ -> Tensor Build t -- ^ __output__ conv2DBackpropFilter = conv2DBackpropFilter' id conv2DBackpropFilter' :: forall v'1 v'2 v'3 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 Data.Int.Int32 -- ^ __filter_sizes__ -> Tensor v'3 t -- ^ __out_backprop__ -> Tensor Build t -- ^ __output__ conv2DBackpropFilter' op'options input filter_sizes out_backprop | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs filter_sizes, buildInputs out_backprop] return (opDef "Conv2DBackpropFilter" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "filter_sizes" type: DT_INT32 } input_arg { name: "out_backprop" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_BFLOAT16 type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "strides" type: "list(int)" } attr { name: "use_cudnn_on_gpu" type: "bool" default_value { b: true } } attr { name: "padding" type: "string" allowed_values { list { s: "SAME" s: "VALID" s: "EXPLICIT" } } } attr { name: "explicit_paddings" type: "list(int)" default_value { list { } } } attr { name: "data_format" type: "string" default_value { s: "NHWC" } allowed_values { list { s: "NHWC" s: "NCHW" } } } attr { name: "dilations" type: "list(int)" default_value { list { i: 1 i: 1 i: 1 i: 1 } } } -} -- | conv2DBackpropInput :: forall v'1 v'2 v'3 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 Data.Int.Int32 -- ^ __input_sizes__ -> Tensor v'2 t -- ^ __filter__ -> Tensor v'3 t -- ^ __out_backprop__ -> Tensor Build t -- ^ __output__ conv2DBackpropInput = conv2DBackpropInput' id conv2DBackpropInput' :: forall v'1 v'2 v'3 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 Data.Int.Int32 -- ^ __input_sizes__ -> Tensor v'2 t -- ^ __filter__ -> Tensor v'3 t -- ^ __out_backprop__ -> Tensor Build t -- ^ __output__ conv2DBackpropInput' op'options input_sizes filter out_backprop | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_sizes, buildInputs filter, buildInputs out_backprop] return (opDef "Conv2DBackpropInput" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_sizes" type: DT_INT32 } input_arg { name: "filter" type_attr: "T" } input_arg { name: "out_backprop" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_BFLOAT16 type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "strides" type: "list(int)" } attr { name: "use_cudnn_on_gpu" type: "bool" default_value { b: true } } attr { name: "padding" type: "string" allowed_values { list { s: "SAME" s: "VALID" s: "EXPLICIT" } } } attr { name: "explicit_paddings" type: "list(int)" default_value { list { } } } attr { name: "data_format" type: "string" default_value { s: "NHWC" } allowed_values { list { s: "NHWC" s: "NCHW" } } } attr { name: "dilations" type: "list(int)" default_value { list { i: 1 i: 1 i: 1 i: 1 } } } -} -- | conv3D :: forall v'1 v'2 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 t -- ^ __filter__ -> Tensor Build t -- ^ __output__ conv3D = conv3D' id conv3D' :: forall v'1 v'2 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 t -- ^ __filter__ -> Tensor Build t -- ^ __output__ conv3D' op'options input filter | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs filter] return (opDef "Conv3D" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "filter" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_BFLOAT16 type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "strides" type: "list(int)" has_minimum: true minimum: 5 } attr { name: "padding" type: "string" allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "data_format" type: "string" default_value { s: "NDHWC" } allowed_values { list { s: "NDHWC" s: "NCDHW" } } } attr { name: "dilations" type: "list(int)" default_value { list { i: 1 i: 1 i: 1 i: 1 i: 1 } } } -} -- | conv3DBackpropFilter :: forall v'1 v'2 v'3 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 t -- ^ __filter__ -> Tensor v'3 t -- ^ __out_backprop__ -> Tensor Build t -- ^ __output__ conv3DBackpropFilter = conv3DBackpropFilter' id conv3DBackpropFilter' :: forall v'1 v'2 v'3 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 t -- ^ __filter__ -> Tensor v'3 t -- ^ __out_backprop__ -> Tensor Build t -- ^ __output__ conv3DBackpropFilter' op'options input filter out_backprop | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs filter, buildInputs out_backprop] return (opDef "Conv3DBackpropFilter" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "filter" type_attr: "T" } input_arg { name: "out_backprop" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "strides" type: "list(int)" has_minimum: true minimum: 5 } attr { name: "padding" type: "string" allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "dilations" type: "list(int)" default_value { list { i: 1 i: 1 i: 1 i: 1 i: 1 } } } -} -- | conv3DBackpropFilterV2 :: forall v'1 v'2 v'3 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 Data.Int.Int32 -- ^ __filter_sizes__ -> Tensor v'3 t -- ^ __out_backprop__ -> Tensor Build t -- ^ __output__ conv3DBackpropFilterV2 = conv3DBackpropFilterV2' id conv3DBackpropFilterV2' :: forall v'1 v'2 v'3 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 Data.Int.Int32 -- ^ __filter_sizes__ -> Tensor v'3 t -- ^ __out_backprop__ -> Tensor Build t -- ^ __output__ conv3DBackpropFilterV2' op'options input filter_sizes out_backprop | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs filter_sizes, buildInputs out_backprop] return (opDef "Conv3DBackpropFilterV2" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "filter_sizes" type: DT_INT32 } input_arg { name: "out_backprop" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_BFLOAT16 type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "strides" type: "list(int)" has_minimum: true minimum: 5 } attr { name: "padding" type: "string" allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "data_format" type: "string" default_value { s: "NDHWC" } allowed_values { list { s: "NDHWC" s: "NCDHW" } } } attr { name: "dilations" type: "list(int)" default_value { list { i: 1 i: 1 i: 1 i: 1 i: 1 } } } -} -- | conv3DBackpropInput :: forall v'1 v'2 v'3 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 t -- ^ __filter__ -> Tensor v'3 t -- ^ __out_backprop__ -> Tensor Build t -- ^ __output__ conv3DBackpropInput = conv3DBackpropInput' id conv3DBackpropInput' :: forall v'1 v'2 v'3 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 t -- ^ __filter__ -> Tensor v'3 t -- ^ __out_backprop__ -> Tensor Build t -- ^ __output__ conv3DBackpropInput' op'options input filter out_backprop | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs filter, buildInputs out_backprop] return (opDef "Conv3DBackpropInput" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "filter" type_attr: "T" } input_arg { name: "out_backprop" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "strides" type: "list(int)" has_minimum: true minimum: 5 } attr { name: "padding" type: "string" allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "dilations" type: "list(int)" default_value { list { i: 1 i: 1 i: 1 i: 1 i: 1 } } } -} -- | conv3DBackpropInputV2 :: forall v'1 v'2 v'3 t tshape . (OneOf '[Data.Word.Word16, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tshape) => Tensor v'1 tshape -- ^ __input_sizes__ -> Tensor v'2 t -- ^ __filter__ -> Tensor v'3 t -- ^ __out_backprop__ -> Tensor Build t -- ^ __output__ conv3DBackpropInputV2 = conv3DBackpropInputV2' id conv3DBackpropInputV2' :: forall v'1 v'2 v'3 t tshape . (OneOf '[Data.Word.Word16, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tshape) => OpParams -> Tensor v'1 tshape -- ^ __input_sizes__ -> Tensor v'2 t -- ^ __filter__ -> Tensor v'3 t -- ^ __out_backprop__ -> Tensor Build t -- ^ __output__ conv3DBackpropInputV2' op'options input_sizes filter out_backprop | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_sizes, buildInputs filter, buildInputs out_backprop] return (opDef "Conv3DBackpropInputV2" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tshape" .~ tensorType (undefined :: tshape) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_sizes" type_attr: "Tshape" } input_arg { name: "filter" type_attr: "T" } input_arg { name: "out_backprop" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_BFLOAT16 type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "strides" type: "list(int)" has_minimum: true minimum: 5 } attr { name: "padding" type: "string" allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "data_format" type: "string" default_value { s: "NDHWC" } allowed_values { list { s: "NDHWC" s: "NCDHW" } } } attr { name: "dilations" type: "list(int)" default_value { list { i: 1 i: 1 i: 1 i: 1 i: 1 } } } attr { name: "Tshape" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | Copy Op. -- -- Performs CPU-to-CPU or GPU-to-GPU deep-copying of tensor, depending on the -- device on which the tensor is allocated. -- N.B.: If the all downstream attached debug ops are disabled given the current -- gRPC gating status, the output will simply forward the input tensor without -- deep-copying. See the documentation of Debug* ops for more details. -- -- Unlike the CopyHost Op, this op does not have HostMemory constraint on its -- input or output. copy :: forall v'1 t . (TensorType t) => Tensor v'1 t -- ^ __input__: Input tensor. -> Tensor Build t -- ^ __output__: Output tensor, deep-copied from input. copy = copy' id copy' :: forall v'1 t . (TensorType t) => OpParams -> Tensor v'1 t -- ^ __input__: Input tensor. -> Tensor Build t -- ^ __output__: Output tensor, deep-copied from input. copy' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "Copy" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "Input tensor." type_attr: "T" } output_arg { name: "output" description: "Output tensor, deep-copied from input." type_attr: "T" } attr { name: "T" type: "type" } attr { name: "tensor_name" type: "string" default_value { s: "" } description: "The name of the input tensor." } attr { name: "debug_ops_spec" type: "list(string)" default_value { list { } } description: "A list of debug op spec (op, url, gated_grpc) for attached debug\nops. Each element of the list has the format\n;;, wherein gated_grpc is boolean represented\nas 0/1. E.g., \"DebugIdentity;grpc://foo:3333;1\",\n\"DebugIdentity;file:///tmp/tfdbg_1;0\"." } -} -- | Copy Host Op. -- -- Performs CPU-to-CPU deep-copying of tensor. -- N.B.: If the all downstream attached debug ops are disabled given the current -- gRPC gating status, the output will simply forward the input tensor without -- deep-copying. See the documentation of Debug* ops for more details. -- -- Unlike the Copy Op, this op has HostMemory constraint on its input or output. copyHost :: forall v'1 t . (TensorType t) => Tensor v'1 t -- ^ __input__: Input tensor. -> Tensor Build t -- ^ __output__: Output tensor, deep-copied from input. copyHost = copyHost' id copyHost' :: forall v'1 t . (TensorType t) => OpParams -> Tensor v'1 t -- ^ __input__: Input tensor. -> Tensor Build t -- ^ __output__: Output tensor, deep-copied from input. copyHost' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "CopyHost" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "Input tensor." type_attr: "T" } output_arg { name: "output" description: "Output tensor, deep-copied from input." type_attr: "T" } attr { name: "T" type: "type" } attr { name: "tensor_name" type: "string" default_value { s: "" } description: "The name of the input tensor." } attr { name: "debug_ops_spec" type: "list(string)" default_value { list { } } description: "A list of debug op spec (op, url, gated_grpc) for attached debug\nops. Each element of the list has the format\n;;, wherein gated_grpc is boolean represented\nas 0/1. E.g., \"DebugIdentity;grpc://foo:3333;1\",\n\"DebugIdentity;file:///tmp/tfdbg_1;0\"." } -} -- | cos :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ cos = cos' id cos' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ cos' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "Cos" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | cosh :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ cosh = cosh' id cosh' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ cosh' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "Cosh" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | countUpTo :: forall t m' . (MonadBuild m', OneOf '[Data.Int.Int32, Data.Int.Int64] t) => Data.Int.Int64 -- ^ __limit__ -> Tensor Ref t -- ^ __ref__ -> m' (Tensor Value t) -- ^ __output__ countUpTo = countUpTo' id countUpTo' :: forall t m' . (MonadBuild m', OneOf '[Data.Int.Int32, Data.Int.Int64] t) => OpParams -> Data.Int.Int64 -- ^ __limit__ -> Tensor Ref t -- ^ __ref__ -> m' (Tensor Value t) -- ^ __output__ countUpTo' op'options limit ref | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs ref] buildOp [] (opDef "CountUpTo" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "limit" .~ limit & op'options & opInputs .~ op'inputs) {- input_arg { name: "ref" type_attr: "T" is_ref: true } output_arg { name: "output" type_attr: "T" } attr { name: "limit" type: "int" } attr { name: "T" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | createSummaryDbWriter :: forall v'1 v'2 v'3 v'4 v'5 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __writer__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __db_uri__ -> Tensor v'3 Data.ByteString.ByteString -- ^ __experiment_name__ -> Tensor v'4 Data.ByteString.ByteString -- ^ __run_name__ -> Tensor v'5 Data.ByteString.ByteString -- ^ __user_name__ -> m' (ControlNode) createSummaryDbWriter = createSummaryDbWriter' id createSummaryDbWriter' :: forall v'1 v'2 v'3 v'4 v'5 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __writer__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __db_uri__ -> Tensor v'3 Data.ByteString.ByteString -- ^ __experiment_name__ -> Tensor v'4 Data.ByteString.ByteString -- ^ __run_name__ -> Tensor v'5 Data.ByteString.ByteString -- ^ __user_name__ -> m' (ControlNode) createSummaryDbWriter' op'options writer db_uri experiment_name run_name user_name | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs writer, buildInputs db_uri, buildInputs experiment_name, buildInputs run_name, buildInputs user_name] buildOp [] (opDef "CreateSummaryDbWriter" & op'options & opInputs .~ op'inputs) {- input_arg { name: "writer" type: DT_RESOURCE } input_arg { name: "db_uri" type: DT_STRING } input_arg { name: "experiment_name" type: DT_STRING } input_arg { name: "run_name" type: DT_STRING } input_arg { name: "user_name" type: DT_STRING } -} -- | createSummaryFileWriter :: forall v'1 v'2 v'3 v'4 v'5 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __writer__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __logdir__ -> Tensor v'3 Data.Int.Int32 -- ^ __max_queue__ -> Tensor v'4 Data.Int.Int32 -- ^ __flush_millis__ -> Tensor v'5 Data.ByteString.ByteString -- ^ __filename_suffix__ -> m' (ControlNode) createSummaryFileWriter = createSummaryFileWriter' id createSummaryFileWriter' :: forall v'1 v'2 v'3 v'4 v'5 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __writer__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __logdir__ -> Tensor v'3 Data.Int.Int32 -- ^ __max_queue__ -> Tensor v'4 Data.Int.Int32 -- ^ __flush_millis__ -> Tensor v'5 Data.ByteString.ByteString -- ^ __filename_suffix__ -> m' (ControlNode) createSummaryFileWriter' op'options writer logdir max_queue flush_millis filename_suffix | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs writer, buildInputs logdir, buildInputs max_queue, buildInputs flush_millis, buildInputs filename_suffix] buildOp [] (opDef "CreateSummaryFileWriter" & op'options & opInputs .~ op'inputs) {- input_arg { name: "writer" type: DT_RESOURCE } input_arg { name: "logdir" type: DT_STRING } input_arg { name: "max_queue" type: DT_INT32 } input_arg { name: "flush_millis" type: DT_INT32 } input_arg { name: "filename_suffix" type: DT_STRING } -} -- | cropAndResize :: forall v'1 v'2 v'3 v'4 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __image__ -> Tensor v'2 Float -- ^ __boxes__ -> Tensor v'3 Data.Int.Int32 -- ^ __box_ind__ -> Tensor v'4 Data.Int.Int32 -- ^ __crop_size__ -> Tensor Build Float -- ^ __crops__ cropAndResize = cropAndResize' id cropAndResize' :: forall v'1 v'2 v'3 v'4 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __image__ -> Tensor v'2 Float -- ^ __boxes__ -> Tensor v'3 Data.Int.Int32 -- ^ __box_ind__ -> Tensor v'4 Data.Int.Int32 -- ^ __crop_size__ -> Tensor Build Float -- ^ __crops__ cropAndResize' op'options image boxes box_ind crop_size | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs image, buildInputs boxes, buildInputs box_ind, buildInputs crop_size] return (opDef "CropAndResize" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "image" type_attr: "T" } input_arg { name: "boxes" type: DT_FLOAT } input_arg { name: "box_ind" type: DT_INT32 } input_arg { name: "crop_size" type: DT_INT32 } output_arg { name: "crops" type: DT_FLOAT } attr { name: "T" type: "type" allowed_values { list { type: DT_UINT8 type: DT_UINT16 type: DT_INT8 type: DT_INT16 type: DT_INT32 type: DT_INT64 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "method" type: "string" default_value { s: "bilinear" } allowed_values { list { s: "bilinear" s: "nearest" } } } attr { name: "extrapolation_value" type: "float" default_value { f: 0.0 } } -} -- | cropAndResizeGradBoxes :: forall v'1 v'2 v'3 v'4 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 Float -- ^ __grads__ -> Tensor v'2 t -- ^ __image__ -> Tensor v'3 Float -- ^ __boxes__ -> Tensor v'4 Data.Int.Int32 -- ^ __box_ind__ -> Tensor Build Float -- ^ __output__ cropAndResizeGradBoxes = cropAndResizeGradBoxes' id cropAndResizeGradBoxes' :: forall v'1 v'2 v'3 v'4 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 Float -- ^ __grads__ -> Tensor v'2 t -- ^ __image__ -> Tensor v'3 Float -- ^ __boxes__ -> Tensor v'4 Data.Int.Int32 -- ^ __box_ind__ -> Tensor Build Float -- ^ __output__ cropAndResizeGradBoxes' op'options grads image boxes box_ind | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs grads, buildInputs image, buildInputs boxes, buildInputs box_ind] return (opDef "CropAndResizeGradBoxes" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "grads" type: DT_FLOAT } input_arg { name: "image" type_attr: "T" } input_arg { name: "boxes" type: DT_FLOAT } input_arg { name: "box_ind" type: DT_INT32 } output_arg { name: "output" type: DT_FLOAT } attr { name: "T" type: "type" allowed_values { list { type: DT_UINT8 type: DT_UINT16 type: DT_INT8 type: DT_INT16 type: DT_INT32 type: DT_INT64 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "method" type: "string" default_value { s: "bilinear" } allowed_values { list { s: "bilinear" } } } -} -- | cropAndResizeGradImage :: forall v'1 v'2 v'3 v'4 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 Float -- ^ __grads__ -> Tensor v'2 Float -- ^ __boxes__ -> Tensor v'3 Data.Int.Int32 -- ^ __box_ind__ -> Tensor v'4 Data.Int.Int32 -- ^ __image_size__ -> Tensor Build t -- ^ __output__ cropAndResizeGradImage = cropAndResizeGradImage' id cropAndResizeGradImage' :: forall v'1 v'2 v'3 v'4 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 Float -- ^ __grads__ -> Tensor v'2 Float -- ^ __boxes__ -> Tensor v'3 Data.Int.Int32 -- ^ __box_ind__ -> Tensor v'4 Data.Int.Int32 -- ^ __image_size__ -> Tensor Build t -- ^ __output__ cropAndResizeGradImage' op'options grads boxes box_ind image_size | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs grads, buildInputs boxes, buildInputs box_ind, buildInputs image_size] return (opDef "CropAndResizeGradImage" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "grads" type: DT_FLOAT } input_arg { name: "boxes" type: DT_FLOAT } input_arg { name: "box_ind" type: DT_INT32 } input_arg { name: "image_size" type: DT_INT32 } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_HALF type: DT_DOUBLE } } } attr { name: "method" type: "string" default_value { s: "bilinear" } allowed_values { list { s: "bilinear" s: "nearest" } } } -} -- | cross :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __a__ -> Tensor v'2 t -- ^ __b__ -> Tensor Build t -- ^ __product__ cross = cross' id cross' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __a__ -> Tensor v'2 t -- ^ __b__ -> Tensor Build t -- ^ __product__ cross' op'options a b | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs a, buildInputs b] return (opDef "Cross" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "a" type_attr: "T" } input_arg { name: "b" type_attr: "T" } output_arg { name: "product" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_INT64 type: DT_BFLOAT16 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } -} -- | crossReplicaSum :: forall v'1 v'2 t . (OneOf '[Data.Int.Int32, Data.Word.Word16, Data.Word.Word32, Float] t) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 Data.Int.Int32 -- ^ __group_assignment__ -> Tensor Build t -- ^ __output__ crossReplicaSum = crossReplicaSum' id crossReplicaSum' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int32, Data.Word.Word16, Data.Word.Word32, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 Data.Int.Int32 -- ^ __group_assignment__ -> Tensor Build t -- ^ __output__ crossReplicaSum' op'options input group_assignment | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs group_assignment] return (opDef "CrossReplicaSum" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "group_assignment" type: DT_INT32 } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_FLOAT type: DT_INT32 type: DT_UINT32 } } } -} -- | cudnnRNN :: forall v'1 v'2 v'3 v'4 t m' . (MonadBuild m', OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 t -- ^ __input_h__ -> Tensor v'3 t -- ^ __input_c__ -> Tensor v'4 t -- ^ __params__ -> m' ((Tensor Value t, Tensor Value t, Tensor Value t, Tensor Value t)) -- ^ (__output__, __output_h__, __output_c__, __reserve_space__) -- -- * __output__ -- -- * __output_h__ -- -- * __output_c__ -- -- * __reserve_space__ cudnnRNN = cudnnRNN' id cudnnRNN' :: forall v'1 v'2 v'3 v'4 t m' . (MonadBuild m', OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 t -- ^ __input_h__ -> Tensor v'3 t -- ^ __input_c__ -> Tensor v'4 t -- ^ __params__ -> m' ((Tensor Value t, Tensor Value t, Tensor Value t, Tensor Value t)) -- ^ (__output__, __output_h__, __output_c__, __reserve_space__) -- -- * __output__ -- -- * __output_h__ -- -- * __output_c__ -- -- * __reserve_space__ cudnnRNN' op'options input input_h input_c params | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs input_h, buildInputs input_c, buildInputs params] buildOp [] (opDef "CudnnRNN" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "input_h" type_attr: "T" } input_arg { name: "input_c" type_attr: "T" } input_arg { name: "params" type_attr: "T" } output_arg { name: "output" type_attr: "T" } output_arg { name: "output_h" type_attr: "T" } output_arg { name: "output_c" type_attr: "T" } output_arg { name: "reserve_space" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "rnn_mode" type: "string" default_value { s: "lstm" } allowed_values { list { s: "rnn_relu" s: "rnn_tanh" s: "lstm" s: "gru" } } } attr { name: "input_mode" type: "string" default_value { s: "linear_input" } allowed_values { list { s: "linear_input" s: "skip_input" s: "auto_select" } } } attr { name: "direction" type: "string" default_value { s: "unidirectional" } allowed_values { list { s: "unidirectional" s: "bidirectional" } } } attr { name: "dropout" type: "float" default_value { f: 0.0 } } attr { name: "seed" type: "int" default_value { i: 0 } } attr { name: "seed2" type: "int" default_value { i: 0 } } attr { name: "is_training" type: "bool" default_value { b: true } } -} -- | cudnnRNNBackprop :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 v'10 v'11 t m' . (MonadBuild m', OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 t -- ^ __input_h__ -> Tensor v'3 t -- ^ __input_c__ -> Tensor v'4 t -- ^ __params__ -> Tensor v'5 t -- ^ __output__ -> Tensor v'6 t -- ^ __output_h__ -> Tensor v'7 t -- ^ __output_c__ -> Tensor v'8 t -- ^ __output_backprop__ -> Tensor v'9 t -- ^ __output_h_backprop__ -> Tensor v'10 t -- ^ __output_c_backprop__ -> Tensor v'11 t -- ^ __reserve_space__ -> m' ((Tensor Value t, Tensor Value t, Tensor Value t, Tensor Value t)) -- ^ (__input_backprop__, __input_h_backprop__, __input_c_backprop__, __params_backprop__) -- -- * __input_backprop__ -- -- * __input_h_backprop__ -- -- * __input_c_backprop__ -- -- * __params_backprop__ cudnnRNNBackprop = cudnnRNNBackprop' id cudnnRNNBackprop' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 v'10 v'11 t m' . (MonadBuild m', OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 t -- ^ __input_h__ -> Tensor v'3 t -- ^ __input_c__ -> Tensor v'4 t -- ^ __params__ -> Tensor v'5 t -- ^ __output__ -> Tensor v'6 t -- ^ __output_h__ -> Tensor v'7 t -- ^ __output_c__ -> Tensor v'8 t -- ^ __output_backprop__ -> Tensor v'9 t -- ^ __output_h_backprop__ -> Tensor v'10 t -- ^ __output_c_backprop__ -> Tensor v'11 t -- ^ __reserve_space__ -> m' ((Tensor Value t, Tensor Value t, Tensor Value t, Tensor Value t)) -- ^ (__input_backprop__, __input_h_backprop__, __input_c_backprop__, __params_backprop__) -- -- * __input_backprop__ -- -- * __input_h_backprop__ -- -- * __input_c_backprop__ -- -- * __params_backprop__ cudnnRNNBackprop' op'options input input_h input_c params output output_h output_c output_backprop output_h_backprop output_c_backprop reserve_space | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs input_h, buildInputs input_c, buildInputs params, buildInputs output, buildInputs output_h, buildInputs output_c, buildInputs output_backprop, buildInputs output_h_backprop, buildInputs output_c_backprop, buildInputs reserve_space] buildOp [] (opDef "CudnnRNNBackprop" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "input_h" type_attr: "T" } input_arg { name: "input_c" type_attr: "T" } input_arg { name: "params" type_attr: "T" } input_arg { name: "output" type_attr: "T" } input_arg { name: "output_h" type_attr: "T" } input_arg { name: "output_c" type_attr: "T" } input_arg { name: "output_backprop" type_attr: "T" } input_arg { name: "output_h_backprop" type_attr: "T" } input_arg { name: "output_c_backprop" type_attr: "T" } input_arg { name: "reserve_space" type_attr: "T" } output_arg { name: "input_backprop" type_attr: "T" } output_arg { name: "input_h_backprop" type_attr: "T" } output_arg { name: "input_c_backprop" type_attr: "T" } output_arg { name: "params_backprop" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "rnn_mode" type: "string" default_value { s: "lstm" } allowed_values { list { s: "rnn_relu" s: "rnn_tanh" s: "lstm" s: "gru" } } } attr { name: "input_mode" type: "string" default_value { s: "linear_input" } allowed_values { list { s: "linear_input" s: "skip_input" s: "auto_select" } } } attr { name: "direction" type: "string" default_value { s: "unidirectional" } allowed_values { list { s: "unidirectional" s: "bidirectional" } } } attr { name: "dropout" type: "float" default_value { f: 0.0 } } attr { name: "seed" type: "int" default_value { i: 0 } } attr { name: "seed2" type: "int" default_value { i: 0 } } -} -- | cudnnRNNBackpropV2 :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 v'10 v'11 v'12 t m' . (MonadBuild m', OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 t -- ^ __input_h__ -> Tensor v'3 t -- ^ __input_c__ -> Tensor v'4 t -- ^ __params__ -> Tensor v'5 t -- ^ __output__ -> Tensor v'6 t -- ^ __output_h__ -> Tensor v'7 t -- ^ __output_c__ -> Tensor v'8 t -- ^ __output_backprop__ -> Tensor v'9 t -- ^ __output_h_backprop__ -> Tensor v'10 t -- ^ __output_c_backprop__ -> Tensor v'11 t -- ^ __reserve_space__ -> Tensor v'12 Data.Int.Int8 -- ^ __host_reserved__ -> m' ((Tensor Value t, Tensor Value t, Tensor Value t, Tensor Value t)) -- ^ (__input_backprop__, __input_h_backprop__, __input_c_backprop__, __params_backprop__) -- -- * __input_backprop__ -- -- * __input_h_backprop__ -- -- * __input_c_backprop__ -- -- * __params_backprop__ cudnnRNNBackpropV2 = cudnnRNNBackpropV2' id cudnnRNNBackpropV2' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 v'10 v'11 v'12 t m' . (MonadBuild m', OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 t -- ^ __input_h__ -> Tensor v'3 t -- ^ __input_c__ -> Tensor v'4 t -- ^ __params__ -> Tensor v'5 t -- ^ __output__ -> Tensor v'6 t -- ^ __output_h__ -> Tensor v'7 t -- ^ __output_c__ -> Tensor v'8 t -- ^ __output_backprop__ -> Tensor v'9 t -- ^ __output_h_backprop__ -> Tensor v'10 t -- ^ __output_c_backprop__ -> Tensor v'11 t -- ^ __reserve_space__ -> Tensor v'12 Data.Int.Int8 -- ^ __host_reserved__ -> m' ((Tensor Value t, Tensor Value t, Tensor Value t, Tensor Value t)) -- ^ (__input_backprop__, __input_h_backprop__, __input_c_backprop__, __params_backprop__) -- -- * __input_backprop__ -- -- * __input_h_backprop__ -- -- * __input_c_backprop__ -- -- * __params_backprop__ cudnnRNNBackpropV2' op'options input input_h input_c params output output_h output_c output_backprop output_h_backprop output_c_backprop reserve_space host_reserved | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs input_h, buildInputs input_c, buildInputs params, buildInputs output, buildInputs output_h, buildInputs output_c, buildInputs output_backprop, buildInputs output_h_backprop, buildInputs output_c_backprop, buildInputs reserve_space, buildInputs host_reserved] buildOp [] (opDef "CudnnRNNBackpropV2" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "input_h" type_attr: "T" } input_arg { name: "input_c" type_attr: "T" } input_arg { name: "params" type_attr: "T" } input_arg { name: "output" type_attr: "T" } input_arg { name: "output_h" type_attr: "T" } input_arg { name: "output_c" type_attr: "T" } input_arg { name: "output_backprop" type_attr: "T" } input_arg { name: "output_h_backprop" type_attr: "T" } input_arg { name: "output_c_backprop" type_attr: "T" } input_arg { name: "reserve_space" type_attr: "T" } input_arg { name: "host_reserved" type: DT_INT8 } output_arg { name: "input_backprop" type_attr: "T" } output_arg { name: "input_h_backprop" type_attr: "T" } output_arg { name: "input_c_backprop" type_attr: "T" } output_arg { name: "params_backprop" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "rnn_mode" type: "string" default_value { s: "lstm" } allowed_values { list { s: "rnn_relu" s: "rnn_tanh" s: "lstm" s: "gru" } } } attr { name: "input_mode" type: "string" default_value { s: "linear_input" } allowed_values { list { s: "linear_input" s: "skip_input" s: "auto_select" } } } attr { name: "direction" type: "string" default_value { s: "unidirectional" } allowed_values { list { s: "unidirectional" s: "bidirectional" } } } attr { name: "dropout" type: "float" default_value { f: 0.0 } } attr { name: "seed" type: "int" default_value { i: 0 } } attr { name: "seed2" type: "int" default_value { i: 0 } } -} -- | cudnnRNNBackpropV3 :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 v'10 v'11 v'12 v'13 t m' . (MonadBuild m', OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 t -- ^ __input_h__ -> Tensor v'3 t -- ^ __input_c__ -> Tensor v'4 t -- ^ __params__ -> Tensor v'5 Data.Int.Int32 -- ^ __sequence_lengths__ -> Tensor v'6 t -- ^ __output__ -> Tensor v'7 t -- ^ __output_h__ -> Tensor v'8 t -- ^ __output_c__ -> Tensor v'9 t -- ^ __output_backprop__ -> Tensor v'10 t -- ^ __output_h_backprop__ -> Tensor v'11 t -- ^ __output_c_backprop__ -> Tensor v'12 t -- ^ __reserve_space__ -> Tensor v'13 Data.Int.Int8 -- ^ __host_reserved__ -> m' ((Tensor Value t, Tensor Value t, Tensor Value t, Tensor Value t)) -- ^ (__input_backprop__, __input_h_backprop__, __input_c_backprop__, __params_backprop__) -- -- * __input_backprop__ -- -- * __input_h_backprop__ -- -- * __input_c_backprop__ -- -- * __params_backprop__ cudnnRNNBackpropV3 = cudnnRNNBackpropV3' id cudnnRNNBackpropV3' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 v'10 v'11 v'12 v'13 t m' . (MonadBuild m', OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 t -- ^ __input_h__ -> Tensor v'3 t -- ^ __input_c__ -> Tensor v'4 t -- ^ __params__ -> Tensor v'5 Data.Int.Int32 -- ^ __sequence_lengths__ -> Tensor v'6 t -- ^ __output__ -> Tensor v'7 t -- ^ __output_h__ -> Tensor v'8 t -- ^ __output_c__ -> Tensor v'9 t -- ^ __output_backprop__ -> Tensor v'10 t -- ^ __output_h_backprop__ -> Tensor v'11 t -- ^ __output_c_backprop__ -> Tensor v'12 t -- ^ __reserve_space__ -> Tensor v'13 Data.Int.Int8 -- ^ __host_reserved__ -> m' ((Tensor Value t, Tensor Value t, Tensor Value t, Tensor Value t)) -- ^ (__input_backprop__, __input_h_backprop__, __input_c_backprop__, __params_backprop__) -- -- * __input_backprop__ -- -- * __input_h_backprop__ -- -- * __input_c_backprop__ -- -- * __params_backprop__ cudnnRNNBackpropV3' op'options input input_h input_c params sequence_lengths output output_h output_c output_backprop output_h_backprop output_c_backprop reserve_space host_reserved | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs input_h, buildInputs input_c, buildInputs params, buildInputs sequence_lengths, buildInputs output, buildInputs output_h, buildInputs output_c, buildInputs output_backprop, buildInputs output_h_backprop, buildInputs output_c_backprop, buildInputs reserve_space, buildInputs host_reserved] buildOp [] (opDef "CudnnRNNBackpropV3" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "input_h" type_attr: "T" } input_arg { name: "input_c" type_attr: "T" } input_arg { name: "params" type_attr: "T" } input_arg { name: "sequence_lengths" type: DT_INT32 } input_arg { name: "output" type_attr: "T" } input_arg { name: "output_h" type_attr: "T" } input_arg { name: "output_c" type_attr: "T" } input_arg { name: "output_backprop" type_attr: "T" } input_arg { name: "output_h_backprop" type_attr: "T" } input_arg { name: "output_c_backprop" type_attr: "T" } input_arg { name: "reserve_space" type_attr: "T" } input_arg { name: "host_reserved" type: DT_INT8 } output_arg { name: "input_backprop" type_attr: "T" } output_arg { name: "input_h_backprop" type_attr: "T" } output_arg { name: "input_c_backprop" type_attr: "T" } output_arg { name: "params_backprop" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "rnn_mode" type: "string" default_value { s: "lstm" } allowed_values { list { s: "rnn_relu" s: "rnn_tanh" s: "lstm" s: "gru" } } } attr { name: "input_mode" type: "string" default_value { s: "linear_input" } allowed_values { list { s: "linear_input" s: "skip_input" s: "auto_select" } } } attr { name: "direction" type: "string" default_value { s: "unidirectional" } allowed_values { list { s: "unidirectional" s: "bidirectional" } } } attr { name: "dropout" type: "float" default_value { f: 0.0 } } attr { name: "seed" type: "int" default_value { i: 0 } } attr { name: "seed2" type: "int" default_value { i: 0 } } attr { name: "time_major" type: "bool" default_value { b: true } } -} -- | cudnnRNNCanonicalToParams :: forall v'1 v'2 v'3 v'4 v'5 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 Data.Int.Int32 -- ^ __num_layers__ -> Tensor v'2 Data.Int.Int32 -- ^ __num_units__ -> Tensor v'3 Data.Int.Int32 -- ^ __input_size__ -> [Tensor v'4 t] -- ^ __weights__ -> [Tensor v'5 t] -- ^ __biases__ -> Tensor Build t -- ^ __params__ cudnnRNNCanonicalToParams = cudnnRNNCanonicalToParams' id cudnnRNNCanonicalToParams' :: forall v'1 v'2 v'3 v'4 v'5 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 Data.Int.Int32 -- ^ __num_layers__ -> Tensor v'2 Data.Int.Int32 -- ^ __num_units__ -> Tensor v'3 Data.Int.Int32 -- ^ __input_size__ -> [Tensor v'4 t] -- ^ __weights__ -> [Tensor v'5 t] -- ^ __biases__ -> Tensor Build t -- ^ __params__ cudnnRNNCanonicalToParams' op'options num_layers num_units input_size weights biases | eqLengthGuard [("num_params", [("weights", length weights), ("biases", length biases)])] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs num_layers, buildInputs num_units, buildInputs input_size, buildInputs weights, buildInputs biases] return (opDef "CudnnRNNCanonicalToParams" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "num_params" .~ num_params & op'options & opInputs .~ op'inputs) where num_params = fromIntegral (length weights) :: Int64 {- input_arg { name: "num_layers" type: DT_INT32 } input_arg { name: "num_units" type: DT_INT32 } input_arg { name: "input_size" type: DT_INT32 } input_arg { name: "weights" type_attr: "T" number_attr: "num_params" } input_arg { name: "biases" type_attr: "T" number_attr: "num_params" } output_arg { name: "params" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "num_params" type: "int" has_minimum: true minimum: 1 } attr { name: "rnn_mode" type: "string" default_value { s: "lstm" } allowed_values { list { s: "rnn_relu" s: "rnn_tanh" s: "lstm" s: "gru" } } } attr { name: "input_mode" type: "string" default_value { s: "linear_input" } allowed_values { list { s: "linear_input" s: "skip_input" s: "auto_select" } } } attr { name: "direction" type: "string" default_value { s: "unidirectional" } allowed_values { list { s: "unidirectional" s: "bidirectional" } } } attr { name: "dropout" type: "float" default_value { f: 0.0 } } attr { name: "seed" type: "int" default_value { i: 0 } } attr { name: "seed2" type: "int" default_value { i: 0 } } -} -- | cudnnRNNParamsSize :: forall v'1 v'2 v'3 s . (OneOf '[Data.Int.Int32, Data.Int.Int64] s) => DataType -- ^ __T__ -> Tensor v'1 Data.Int.Int32 -- ^ __num_layers__ -> Tensor v'2 Data.Int.Int32 -- ^ __num_units__ -> Tensor v'3 Data.Int.Int32 -- ^ __input_size__ -> Tensor Build s -- ^ __params_size__ cudnnRNNParamsSize = cudnnRNNParamsSize' id cudnnRNNParamsSize' :: forall v'1 v'2 v'3 s . (OneOf '[Data.Int.Int32, Data.Int.Int64] s) => OpParams -> DataType -- ^ __T__ -> Tensor v'1 Data.Int.Int32 -- ^ __num_layers__ -> Tensor v'2 Data.Int.Int32 -- ^ __num_units__ -> Tensor v'3 Data.Int.Int32 -- ^ __input_size__ -> Tensor Build s -- ^ __params_size__ cudnnRNNParamsSize' op'options t num_layers num_units input_size | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs num_layers, buildInputs num_units, buildInputs input_size] return (opDef "CudnnRNNParamsSize" & opAttr "S" .~ tensorType (undefined :: s) & opAttr "T" .~ t & op'options & opInputs .~ op'inputs) {- input_arg { name: "num_layers" type: DT_INT32 } input_arg { name: "num_units" type: DT_INT32 } input_arg { name: "input_size" type: DT_INT32 } output_arg { name: "params_size" type_attr: "S" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "S" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "rnn_mode" type: "string" default_value { s: "lstm" } allowed_values { list { s: "rnn_relu" s: "rnn_tanh" s: "lstm" s: "gru" } } } attr { name: "input_mode" type: "string" default_value { s: "linear_input" } allowed_values { list { s: "linear_input" s: "skip_input" s: "auto_select" } } } attr { name: "direction" type: "string" default_value { s: "unidirectional" } allowed_values { list { s: "unidirectional" s: "bidirectional" } } } attr { name: "dropout" type: "float" default_value { f: 0.0 } } attr { name: "seed" type: "int" default_value { i: 0 } } attr { name: "seed2" type: "int" default_value { i: 0 } } -} -- | cudnnRNNParamsToCanonical :: forall v'1 v'2 v'3 v'4 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Data.Int.Int64 -- ^ __num_params__ -> Tensor v'1 Data.Int.Int32 -- ^ __num_layers__ -> Tensor v'2 Data.Int.Int32 -- ^ __num_units__ -> Tensor v'3 Data.Int.Int32 -- ^ __input_size__ -> Tensor v'4 t -- ^ __params__ -> ([Tensor Build t], [Tensor Build t]) -- ^ (__weights__, __biases__) -- -- * __weights__ -- -- * __biases__ cudnnRNNParamsToCanonical = cudnnRNNParamsToCanonical' id cudnnRNNParamsToCanonical' :: forall v'1 v'2 v'3 v'4 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Data.Int.Int64 -- ^ __num_params__ -> Tensor v'1 Data.Int.Int32 -- ^ __num_layers__ -> Tensor v'2 Data.Int.Int32 -- ^ __num_units__ -> Tensor v'3 Data.Int.Int32 -- ^ __input_size__ -> Tensor v'4 t -- ^ __params__ -> ([Tensor Build t], [Tensor Build t]) -- ^ (__weights__, __biases__) -- -- * __weights__ -- -- * __biases__ cudnnRNNParamsToCanonical' op'options num_params num_layers num_units input_size params | eqLengthGuard [] = pureOp [num_params, num_params] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs num_layers, buildInputs num_units, buildInputs input_size, buildInputs params] return (opDef "CudnnRNNParamsToCanonical" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "num_params" .~ num_params & op'options & opInputs .~ op'inputs) {- input_arg { name: "num_layers" type: DT_INT32 } input_arg { name: "num_units" type: DT_INT32 } input_arg { name: "input_size" type: DT_INT32 } input_arg { name: "params" type_attr: "T" } output_arg { name: "weights" type_attr: "T" number_attr: "num_params" } output_arg { name: "biases" type_attr: "T" number_attr: "num_params" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "num_params" type: "int" has_minimum: true minimum: 1 } attr { name: "rnn_mode" type: "string" default_value { s: "lstm" } allowed_values { list { s: "rnn_relu" s: "rnn_tanh" s: "lstm" s: "gru" } } } attr { name: "input_mode" type: "string" default_value { s: "linear_input" } allowed_values { list { s: "linear_input" s: "skip_input" s: "auto_select" } } } attr { name: "direction" type: "string" default_value { s: "unidirectional" } allowed_values { list { s: "unidirectional" s: "bidirectional" } } } attr { name: "dropout" type: "float" default_value { f: 0.0 } } attr { name: "seed" type: "int" default_value { i: 0 } } attr { name: "seed2" type: "int" default_value { i: 0 } } -} -- | cudnnRNNV2 :: forall v'1 v'2 v'3 v'4 t m' . (MonadBuild m', OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 t -- ^ __input_h__ -> Tensor v'3 t -- ^ __input_c__ -> Tensor v'4 t -- ^ __params__ -> m' ((Tensor Value t, Tensor Value t, Tensor Value t, Tensor Value t, Tensor Value Data.Int.Int8)) -- ^ (__output__, __output_h__, __output_c__, __reserve_space__, __host_reserved__) -- -- * __output__ -- -- * __output_h__ -- -- * __output_c__ -- -- * __reserve_space__ -- -- * __host_reserved__ cudnnRNNV2 = cudnnRNNV2' id cudnnRNNV2' :: forall v'1 v'2 v'3 v'4 t m' . (MonadBuild m', OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 t -- ^ __input_h__ -> Tensor v'3 t -- ^ __input_c__ -> Tensor v'4 t -- ^ __params__ -> m' ((Tensor Value t, Tensor Value t, Tensor Value t, Tensor Value t, Tensor Value Data.Int.Int8)) -- ^ (__output__, __output_h__, __output_c__, __reserve_space__, __host_reserved__) -- -- * __output__ -- -- * __output_h__ -- -- * __output_c__ -- -- * __reserve_space__ -- -- * __host_reserved__ cudnnRNNV2' op'options input input_h input_c params | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs input_h, buildInputs input_c, buildInputs params] buildOp [] (opDef "CudnnRNNV2" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "input_h" type_attr: "T" } input_arg { name: "input_c" type_attr: "T" } input_arg { name: "params" type_attr: "T" } output_arg { name: "output" type_attr: "T" } output_arg { name: "output_h" type_attr: "T" } output_arg { name: "output_c" type_attr: "T" } output_arg { name: "reserve_space" type_attr: "T" } output_arg { name: "host_reserved" type: DT_INT8 } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "rnn_mode" type: "string" default_value { s: "lstm" } allowed_values { list { s: "rnn_relu" s: "rnn_tanh" s: "lstm" s: "gru" } } } attr { name: "input_mode" type: "string" default_value { s: "linear_input" } allowed_values { list { s: "linear_input" s: "skip_input" s: "auto_select" } } } attr { name: "direction" type: "string" default_value { s: "unidirectional" } allowed_values { list { s: "unidirectional" s: "bidirectional" } } } attr { name: "dropout" type: "float" default_value { f: 0.0 } } attr { name: "seed" type: "int" default_value { i: 0 } } attr { name: "seed2" type: "int" default_value { i: 0 } } attr { name: "is_training" type: "bool" default_value { b: true } } -} -- | cudnnRNNV3 :: forall v'1 v'2 v'3 v'4 v'5 t m' . (MonadBuild m', OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 t -- ^ __input_h__ -> Tensor v'3 t -- ^ __input_c__ -> Tensor v'4 t -- ^ __params__ -> Tensor v'5 Data.Int.Int32 -- ^ __sequence_lengths__ -> m' ((Tensor Value t, Tensor Value t, Tensor Value t, Tensor Value t, Tensor Value Data.Int.Int8)) -- ^ (__output__, __output_h__, __output_c__, __reserve_space__, __host_reserved__) -- -- * __output__ -- -- * __output_h__ -- -- * __output_c__ -- -- * __reserve_space__ -- -- * __host_reserved__ cudnnRNNV3 = cudnnRNNV3' id cudnnRNNV3' :: forall v'1 v'2 v'3 v'4 v'5 t m' . (MonadBuild m', OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 t -- ^ __input_h__ -> Tensor v'3 t -- ^ __input_c__ -> Tensor v'4 t -- ^ __params__ -> Tensor v'5 Data.Int.Int32 -- ^ __sequence_lengths__ -> m' ((Tensor Value t, Tensor Value t, Tensor Value t, Tensor Value t, Tensor Value Data.Int.Int8)) -- ^ (__output__, __output_h__, __output_c__, __reserve_space__, __host_reserved__) -- -- * __output__ -- -- * __output_h__ -- -- * __output_c__ -- -- * __reserve_space__ -- -- * __host_reserved__ cudnnRNNV3' op'options input input_h input_c params sequence_lengths | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs input_h, buildInputs input_c, buildInputs params, buildInputs sequence_lengths] buildOp [] (opDef "CudnnRNNV3" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "input_h" type_attr: "T" } input_arg { name: "input_c" type_attr: "T" } input_arg { name: "params" type_attr: "T" } input_arg { name: "sequence_lengths" type: DT_INT32 } output_arg { name: "output" type_attr: "T" } output_arg { name: "output_h" type_attr: "T" } output_arg { name: "output_c" type_attr: "T" } output_arg { name: "reserve_space" type_attr: "T" } output_arg { name: "host_reserved" type: DT_INT8 } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "rnn_mode" type: "string" default_value { s: "lstm" } allowed_values { list { s: "rnn_relu" s: "rnn_tanh" s: "lstm" s: "gru" } } } attr { name: "input_mode" type: "string" default_value { s: "linear_input" } allowed_values { list { s: "linear_input" s: "skip_input" s: "auto_select" } } } attr { name: "direction" type: "string" default_value { s: "unidirectional" } allowed_values { list { s: "unidirectional" s: "bidirectional" } } } attr { name: "dropout" type: "float" default_value { f: 0.0 } } attr { name: "seed" type: "int" default_value { i: 0 } } attr { name: "seed2" type: "int" default_value { i: 0 } } attr { name: "is_training" type: "bool" default_value { b: true } } attr { name: "time_major" type: "bool" default_value { b: true } } -} -- | cumprod :: forall v'1 v'2 t tidx . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 tidx -- ^ __axis__ -> Tensor Build t -- ^ __out__ cumprod = cumprod' id cumprod' :: forall v'1 v'2 t tidx . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 tidx -- ^ __axis__ -> Tensor Build t -- ^ __out__ cumprod' op'options x axis | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs axis] return (opDef "Cumprod" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tidx" .~ tensorType (undefined :: tidx) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "axis" type_attr: "Tidx" } output_arg { name: "out" type_attr: "T" } attr { name: "exclusive" type: "bool" default_value { b: false } } attr { name: "reverse" type: "bool" default_value { b: false } } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tidx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | cumsum :: forall v'1 v'2 t tidx . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 tidx -- ^ __axis__ -> Tensor Build t -- ^ __out__ cumsum = cumsum' id cumsum' :: forall v'1 v'2 t tidx . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 tidx -- ^ __axis__ -> Tensor Build t -- ^ __out__ cumsum' op'options x axis | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs axis] return (opDef "Cumsum" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tidx" .~ tensorType (undefined :: tidx) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "axis" type_attr: "Tidx" } output_arg { name: "out" type_attr: "T" } attr { name: "exclusive" type: "bool" default_value { b: false } } attr { name: "reverse" type: "bool" default_value { b: false } } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tidx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | dataFormatDimMap :: forall v'1 t . (OneOf '[Data.Int.Int32, Data.Int.Int64] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ dataFormatDimMap = dataFormatDimMap' id dataFormatDimMap' :: forall v'1 t . (OneOf '[Data.Int.Int32, Data.Int.Int64] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ dataFormatDimMap' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "DataFormatDimMap" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "src_format" type: "string" default_value { s: "NHWC" } } attr { name: "dst_format" type: "string" default_value { s: "NCHW" } } -} -- | dataFormatVecPermute :: forall v'1 t . (OneOf '[Data.Int.Int32, Data.Int.Int64] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ dataFormatVecPermute = dataFormatVecPermute' id dataFormatVecPermute' :: forall v'1 t . (OneOf '[Data.Int.Int32, Data.Int.Int64] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ dataFormatVecPermute' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "DataFormatVecPermute" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "src_format" type: "string" default_value { s: "NHWC" } } attr { name: "dst_format" type: "string" default_value { s: "NCHW" } } -} -- | datasetToGraph :: Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor Build Data.ByteString.ByteString -- ^ __graph__ datasetToGraph = datasetToGraph' id datasetToGraph' :: OpParams -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor Build Data.ByteString.ByteString -- ^ __graph__ datasetToGraph' op'options input_dataset | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_dataset] return (opDef "DatasetToGraph" & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_dataset" type: DT_VARIANT } output_arg { name: "graph" type: DT_STRING } -} -- | datasetToSingleElement :: forall v'1 output_types m' . (MonadBuild m', TensorTypes output_types) => Tensor v'1 Variant -- ^ __dataset__ -> m' (TensorList (Value) output_types) -- ^ __components__ datasetToSingleElement = datasetToSingleElement' id datasetToSingleElement' :: forall v'1 output_types m' . (MonadBuild m', TensorTypes output_types) => OpParams -> Tensor v'1 Variant -- ^ __dataset__ -> m' (TensorList (Value) output_types) -- ^ __components__ datasetToSingleElement' op'options dataset | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs dataset] buildOp [] (opDef "DatasetToSingleElement" & opAttr "output_types" .~ fromTensorTypes (Proxy :: Proxy output_types) & op'options & opInputs .~ op'inputs) {- input_arg { name: "dataset" type: DT_VARIANT } output_arg { name: "components" type_list_attr: "output_types" } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } -} -- | debugGradientIdentity :: forall v'1 t . (TensorType t) => Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ debugGradientIdentity = debugGradientIdentity' id debugGradientIdentity' :: forall v'1 t . (TensorType t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ debugGradientIdentity' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "DebugGradientIdentity" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } -} -- | debugGradientRefIdentity :: forall t m' . (MonadBuild m', TensorType t) => Tensor Ref t -- ^ __input__ -> m' (Tensor Ref t) -- ^ __output__ debugGradientRefIdentity = debugGradientRefIdentity' id debugGradientRefIdentity' :: forall t m' . (MonadBuild m', TensorType t) => OpParams -> Tensor Ref t -- ^ __input__ -> m' (Tensor Ref t) -- ^ __output__ debugGradientRefIdentity' op'options input | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] buildOp [] (opDef "DebugGradientRefIdentity" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" is_ref: true } output_arg { name: "output" type_attr: "T" is_ref: true } attr { name: "T" type: "type" } -} -- | Debug Identity Op. -- -- Provides an identity mapping of the non-Ref type input tensor for debugging. debugIdentity :: forall v'1 t . (TensorType t) => Tensor v'1 t -- ^ __input__: Input tensor, non-Reference type. -> Tensor Build t -- ^ __output__: Output tensor that equals the input tensor. debugIdentity = debugIdentity' id debugIdentity' :: forall v'1 t . (TensorType t) => OpParams -> Tensor v'1 t -- ^ __input__: Input tensor, non-Reference type. -> Tensor Build t -- ^ __output__: Output tensor that equals the input tensor. debugIdentity' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "DebugIdentity" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "Input tensor, non-Reference type." type_attr: "T" } output_arg { name: "output" description: "Output tensor that equals the input tensor." type_attr: "T" } attr { name: "T" type: "type" } attr { name: "device_name" type: "string" default_value { s: "" } } attr { name: "tensor_name" type: "string" default_value { s: "" } description: "Name of the input tensor." } attr { name: "debug_urls" type: "list(string)" default_value { list { } } description: "List of URLs to debug targets, e.g.,\nfile:///foo/tfdbg_dump, grpc:://localhost:11011" } attr { name: "gated_grpc" type: "bool" default_value { b: false } description: "Whether this op will be gated. If any of the debug_urls of this\ndebug node is of the grpc:// scheme, when the value of this attribute is set\nto True, the data will not actually be sent via the grpc stream unless this\ndebug op has been enabled at the debug_url. If all of the debug_urls of this\ndebug node are of the grpc:// scheme and the debug op is enabled at none of\nthem, the output will be an empty Tensor." } -} -- | Debug NaN Value Counter Op -- -- Counts number of NaNs in the input tensor, for debugging. debugNanCount :: forall v'1 t . (TensorType t) => Tensor v'1 t -- ^ __input__: Input tensor, non-Reference type. -> Tensor Build Data.Int.Int64 -- ^ __output__: An integer output tensor that is the number of NaNs in the input. debugNanCount = debugNanCount' id debugNanCount' :: forall v'1 t . (TensorType t) => OpParams -> Tensor v'1 t -- ^ __input__: Input tensor, non-Reference type. -> Tensor Build Data.Int.Int64 -- ^ __output__: An integer output tensor that is the number of NaNs in the input. debugNanCount' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "DebugNanCount" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "Input tensor, non-Reference type." type_attr: "T" } output_arg { name: "output" description: "An integer output tensor that is the number of NaNs in the input." type: DT_INT64 } attr { name: "T" type: "type" } attr { name: "device_name" type: "string" default_value { s: "" } } attr { name: "tensor_name" type: "string" default_value { s: "" } description: "Name of the input tensor." } attr { name: "debug_urls" type: "list(string)" default_value { list { } } description: "List of URLs to debug targets, e.g.,\nfile:///foo/tfdbg_dump, grpc:://localhost:11011." } attr { name: "gated_grpc" type: "bool" default_value { b: false } description: "Whether this op will be gated. If any of the debug_urls of this\ndebug node is of the grpc:// scheme, when the value of this attribute is set\nto True, the data will not actually be sent via the grpc stream unless this\ndebug op has been enabled at the debug_url. If all of the debug_urls of this\ndebug node are of the grpc:// scheme and the debug op is enabled at none of\nthem, the output will be an empty Tensor." } -} -- | Debug Numeric Summary Op. -- -- Provide a basic summary of numeric value types, range and distribution. debugNumericSummary :: forall v'1 t . (TensorType t) => Tensor v'1 t -- ^ __input__: Input tensor, non-Reference type, float or double. -> Tensor Build Double -- ^ __output__: A double tensor of shape [14 + nDimensions], where nDimensions is the -- the number of dimensions of the tensor's shape. The elements of output are: -- [0]: is initialized (1.0) or not (0.0). -- [1]: total number of elements -- [2]: NaN element count -- [3]: generalized -inf count: elements <= lower_bound. lower_bound is -inf by -- default. -- [4]: negative element count (excluding -inf), if lower_bound is the default -- -inf. Otherwise, this is the count of elements > lower_bound and < 0. -- [5]: zero element count -- [6]: positive element count (excluding +inf), if upper_bound is the default -- -inf. Otherwise, this is the count of elements < upper_bound and > 0. -- [7]: generalized +inf count, elements >= upper_bound. upper_bound is +inf by -- default. -- Output elements [1:8] are all zero, if the tensor is uninitialized. -- [8]: minimum of all non-inf and non-NaN elements. -- If uninitialized or no such element exists: +inf. -- [9]: maximum of all non-inf and non-NaN elements. -- If uninitialized or no such element exists: -inf. -- [10]: mean of all non-inf and non-NaN elements. -- If uninitialized or no such element exists: NaN. -- [11]: variance of all non-inf and non-NaN elements. -- If uninitialized or no such element exists: NaN. -- [12]: Data type of the tensor encoded as an enum integer. See the DataType -- proto for more details. -- [13]: Number of dimensions of the tensor (ndims). -- [14+]: Sizes of the dimensions. debugNumericSummary = debugNumericSummary' id debugNumericSummary' :: forall v'1 t . (TensorType t) => OpParams -> Tensor v'1 t -- ^ __input__: Input tensor, non-Reference type, float or double. -> Tensor Build Double -- ^ __output__: A double tensor of shape [14 + nDimensions], where nDimensions is the -- the number of dimensions of the tensor's shape. The elements of output are: -- [0]: is initialized (1.0) or not (0.0). -- [1]: total number of elements -- [2]: NaN element count -- [3]: generalized -inf count: elements <= lower_bound. lower_bound is -inf by -- default. -- [4]: negative element count (excluding -inf), if lower_bound is the default -- -inf. Otherwise, this is the count of elements > lower_bound and < 0. -- [5]: zero element count -- [6]: positive element count (excluding +inf), if upper_bound is the default -- -inf. Otherwise, this is the count of elements < upper_bound and > 0. -- [7]: generalized +inf count, elements >= upper_bound. upper_bound is +inf by -- default. -- Output elements [1:8] are all zero, if the tensor is uninitialized. -- [8]: minimum of all non-inf and non-NaN elements. -- If uninitialized or no such element exists: +inf. -- [9]: maximum of all non-inf and non-NaN elements. -- If uninitialized or no such element exists: -inf. -- [10]: mean of all non-inf and non-NaN elements. -- If uninitialized or no such element exists: NaN. -- [11]: variance of all non-inf and non-NaN elements. -- If uninitialized or no such element exists: NaN. -- [12]: Data type of the tensor encoded as an enum integer. See the DataType -- proto for more details. -- [13]: Number of dimensions of the tensor (ndims). -- [14+]: Sizes of the dimensions. debugNumericSummary' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "DebugNumericSummary" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "Input tensor, non-Reference type, float or double." type_attr: "T" } output_arg { name: "output" description: "A double tensor of shape [14 + nDimensions], where nDimensions is the\n the number of dimensions of the tensor\'s shape. The elements of output are:\n [0]: is initialized (1.0) or not (0.0).\n [1]: total number of elements\n [2]: NaN element count\n [3]: generalized -inf count: elements <= lower_bound. lower_bound is -inf by\n default.\n [4]: negative element count (excluding -inf), if lower_bound is the default\n -inf. Otherwise, this is the count of elements > lower_bound and < 0.\n [5]: zero element count\n [6]: positive element count (excluding +inf), if upper_bound is the default\n -inf. Otherwise, this is the count of elements < upper_bound and > 0.\n [7]: generalized +inf count, elements >= upper_bound. upper_bound is +inf by\n default.\nOutput elements [1:8] are all zero, if the tensor is uninitialized.\n [8]: minimum of all non-inf and non-NaN elements.\n If uninitialized or no such element exists: +inf.\n [9]: maximum of all non-inf and non-NaN elements.\n If uninitialized or no such element exists: -inf.\n [10]: mean of all non-inf and non-NaN elements.\n If uninitialized or no such element exists: NaN.\n [11]: variance of all non-inf and non-NaN elements.\n If uninitialized or no such element exists: NaN.\n [12]: Data type of the tensor encoded as an enum integer. See the DataType\n proto for more details.\n [13]: Number of dimensions of the tensor (ndims).\n [14+]: Sizes of the dimensions." type: DT_DOUBLE } attr { name: "T" type: "type" } attr { name: "device_name" type: "string" default_value { s: "" } } attr { name: "tensor_name" type: "string" default_value { s: "" } description: "Name of the input tensor." } attr { name: "debug_urls" type: "list(string)" default_value { list { } } description: "List of URLs to debug targets, e.g.,\nfile:///foo/tfdbg_dump, grpc:://localhost:11011" } attr { name: "lower_bound" type: "float" default_value { f: -Infinity } description: "(float) The lower bound <= which values will be included in the\ngeneralized -inf count. Default: -inf." } attr { name: "upper_bound" type: "float" default_value { f: Infinity } description: "(float) The upper bound >= which values will be included in the\ngeneralized +inf count. Default: +inf." } attr { name: "mute_if_healthy" type: "bool" default_value { b: false } description: "(bool) Do not send data to the debug URLs unless at least one\nof elements [2], [3] and [7] (i.e., the nan count and the generalized -inf and\ninf counts) is non-zero." } attr { name: "gated_grpc" type: "bool" default_value { b: false } description: "Whether this op will be gated. If any of the debug_urls of this\ndebug node is of the grpc:// scheme, when the value of this attribute is set\nto True, the data will not actually be sent via the grpc stream unless this\ndebug op has been enabled at the debug_url. If all of the debug_urls of this\ndebug node are of the grpc:// scheme and the debug op is enabled at none of\nthem, the output will be an empty Tensor." } -} -- | decodeAndCropJpeg :: Tensor v'1 Data.ByteString.ByteString -- ^ __contents__ -> Tensor v'2 Data.Int.Int32 -- ^ __crop_window__ -> Tensor Build Data.Word.Word8 -- ^ __image__ decodeAndCropJpeg = decodeAndCropJpeg' id decodeAndCropJpeg' :: OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __contents__ -> Tensor v'2 Data.Int.Int32 -- ^ __crop_window__ -> Tensor Build Data.Word.Word8 -- ^ __image__ decodeAndCropJpeg' op'options contents crop_window | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs contents, buildInputs crop_window] return (opDef "DecodeAndCropJpeg" & op'options & opInputs .~ op'inputs) {- input_arg { name: "contents" type: DT_STRING } input_arg { name: "crop_window" type: DT_INT32 } output_arg { name: "image" type: DT_UINT8 } attr { name: "channels" type: "int" default_value { i: 0 } } attr { name: "ratio" type: "int" default_value { i: 1 } } attr { name: "fancy_upscaling" type: "bool" default_value { b: true } } attr { name: "try_recover_truncated" type: "bool" default_value { b: false } } attr { name: "acceptable_fraction" type: "float" default_value { f: 1.0 } } attr { name: "dct_method" type: "string" default_value { s: "" } } -} -- | decodeBase64 :: Tensor v'1 Data.ByteString.ByteString -- ^ __input__ -> Tensor Build Data.ByteString.ByteString -- ^ __output__ decodeBase64 = decodeBase64' id decodeBase64' :: OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __input__ -> Tensor Build Data.ByteString.ByteString -- ^ __output__ decodeBase64' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "DecodeBase64" & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type: DT_STRING } output_arg { name: "output" type: DT_STRING } -} -- | decodeBmp :: Tensor v'1 Data.ByteString.ByteString -- ^ __contents__ -> Tensor Build Data.Word.Word8 -- ^ __image__ decodeBmp = decodeBmp' id decodeBmp' :: OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __contents__ -> Tensor Build Data.Word.Word8 -- ^ __image__ decodeBmp' op'options contents | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs contents] return (opDef "DecodeBmp" & op'options & opInputs .~ op'inputs) {- input_arg { name: "contents" type: DT_STRING } output_arg { name: "image" type: DT_UINT8 } attr { name: "channels" type: "int" default_value { i: 0 } } -} -- | decodeCSV :: forall v'1 v'2 oUT_TYPE . (OneOfs '[Data.ByteString.ByteString, Data.Int.Int32, Data.Int.Int64, Double, Float] oUT_TYPE) => Tensor v'1 Data.ByteString.ByteString -- ^ __records__ -> TensorList (v'2) oUT_TYPE -- ^ __record_defaults__ -> TensorList (Build) oUT_TYPE -- ^ __output__ decodeCSV = decodeCSV' id decodeCSV' :: forall v'1 v'2 oUT_TYPE . (OneOfs '[Data.ByteString.ByteString, Data.Int.Int32, Data.Int.Int64, Double, Float] oUT_TYPE) => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __records__ -> TensorList (v'2) oUT_TYPE -- ^ __record_defaults__ -> TensorList (Build) oUT_TYPE -- ^ __output__ decodeCSV' op'options records record_defaults | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs records, buildInputs record_defaults] return (opDef "DecodeCSV" & opAttr "OUT_TYPE" .~ fromTensorTypes (Proxy :: Proxy oUT_TYPE) & op'options & opInputs .~ op'inputs) {- input_arg { name: "records" type: DT_STRING } input_arg { name: "record_defaults" type_list_attr: "OUT_TYPE" } output_arg { name: "output" type_list_attr: "OUT_TYPE" } attr { name: "OUT_TYPE" type: "list(type)" has_minimum: true minimum: 1 allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_STRING } } } attr { name: "field_delim" type: "string" default_value { s: "," } } attr { name: "use_quote_delim" type: "bool" default_value { b: true } } attr { name: "na_value" type: "string" default_value { s: "" } } attr { name: "select_cols" type: "list(int)" default_value { list { } } } -} -- | decodeCompressed :: Tensor v'1 Data.ByteString.ByteString -- ^ __bytes__ -> Tensor Build Data.ByteString.ByteString -- ^ __output__ decodeCompressed = decodeCompressed' id decodeCompressed' :: OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __bytes__ -> Tensor Build Data.ByteString.ByteString -- ^ __output__ decodeCompressed' op'options bytes | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs bytes] return (opDef "DecodeCompressed" & op'options & opInputs .~ op'inputs) {- input_arg { name: "bytes" type: DT_STRING } output_arg { name: "output" type: DT_STRING } attr { name: "compression_type" type: "string" default_value { s: "" } } -} -- | decodeGif :: Tensor v'1 Data.ByteString.ByteString -- ^ __contents__ -> Tensor Build Data.Word.Word8 -- ^ __image__ decodeGif = decodeGif' id decodeGif' :: OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __contents__ -> Tensor Build Data.Word.Word8 -- ^ __image__ decodeGif' op'options contents | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs contents] return (opDef "DecodeGif" & op'options & opInputs .~ op'inputs) {- input_arg { name: "contents" type: DT_STRING } output_arg { name: "image" type: DT_UINT8 } -} -- | decodeJSONExample :: Tensor v'1 Data.ByteString.ByteString -- ^ __json_examples__ -> Tensor Build Data.ByteString.ByteString -- ^ __binary_examples__ decodeJSONExample = decodeJSONExample' id decodeJSONExample' :: OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __json_examples__ -> Tensor Build Data.ByteString.ByteString -- ^ __binary_examples__ decodeJSONExample' op'options json_examples | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs json_examples] return (opDef "DecodeJSONExample" & op'options & opInputs .~ op'inputs) {- input_arg { name: "json_examples" type: DT_STRING } output_arg { name: "binary_examples" type: DT_STRING } -} -- | decodeJpeg :: Tensor v'1 Data.ByteString.ByteString -- ^ __contents__ -> Tensor Build Data.Word.Word8 -- ^ __image__ decodeJpeg = decodeJpeg' id decodeJpeg' :: OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __contents__ -> Tensor Build Data.Word.Word8 -- ^ __image__ decodeJpeg' op'options contents | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs contents] return (opDef "DecodeJpeg" & op'options & opInputs .~ op'inputs) {- input_arg { name: "contents" type: DT_STRING } output_arg { name: "image" type: DT_UINT8 } attr { name: "channels" type: "int" default_value { i: 0 } } attr { name: "ratio" type: "int" default_value { i: 1 } } attr { name: "fancy_upscaling" type: "bool" default_value { b: true } } attr { name: "try_recover_truncated" type: "bool" default_value { b: false } } attr { name: "acceptable_fraction" type: "float" default_value { f: 1.0 } } attr { name: "dct_method" type: "string" default_value { s: "" } } -} -- | decodePaddedRaw :: forall v'1 v'2 out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] out_type) => Tensor v'1 Data.ByteString.ByteString -- ^ __input_bytes__ -> Tensor v'2 Data.Int.Int32 -- ^ __fixed_length__ -> Tensor Build out_type -- ^ __output__ decodePaddedRaw = decodePaddedRaw' id decodePaddedRaw' :: forall v'1 v'2 out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] out_type) => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __input_bytes__ -> Tensor v'2 Data.Int.Int32 -- ^ __fixed_length__ -> Tensor Build out_type -- ^ __output__ decodePaddedRaw' op'options input_bytes fixed_length | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_bytes, buildInputs fixed_length] return (opDef "DecodePaddedRaw" & opAttr "out_type" .~ tensorType (undefined :: out_type) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_bytes" type: DT_STRING } input_arg { name: "fixed_length" type: DT_INT32 } output_arg { name: "output" type_attr: "out_type" } attr { name: "out_type" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT16 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_INT64 } } } attr { name: "little_endian" type: "bool" default_value { b: true } } -} -- | decodePng :: forall v'1 dtype . (OneOf '[Data.Word.Word16, Data.Word.Word8] dtype) => Tensor v'1 Data.ByteString.ByteString -- ^ __contents__ -> Tensor Build dtype -- ^ __image__ decodePng = decodePng' id decodePng' :: forall v'1 dtype . (OneOf '[Data.Word.Word16, Data.Word.Word8] dtype) => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __contents__ -> Tensor Build dtype -- ^ __image__ decodePng' op'options contents | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs contents] return (opDef "DecodePng" & opAttr "dtype" .~ tensorType (undefined :: dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "contents" type: DT_STRING } output_arg { name: "image" type_attr: "dtype" } attr { name: "channels" type: "int" default_value { i: 0 } } attr { name: "dtype" type: "type" default_value { type: DT_UINT8 } allowed_values { list { type: DT_UINT8 type: DT_UINT16 } } } -} -- | decodeProtoV2 :: forall v'1 output_types . (TensorTypes output_types) => Tensor v'1 Data.ByteString.ByteString -- ^ __bytes__ -> (Tensor Build Data.Int.Int32, TensorList (Build) output_types) -- ^ (__sizes__, __values__) -- -- * __sizes__ -- -- * __values__ decodeProtoV2 = decodeProtoV2' id decodeProtoV2' :: forall v'1 output_types . (TensorTypes output_types) => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __bytes__ -> (Tensor Build Data.Int.Int32, TensorList (Build) output_types) -- ^ (__sizes__, __values__) -- -- * __sizes__ -- -- * __values__ decodeProtoV2' op'options bytes | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs bytes] return (opDef "DecodeProtoV2" & opAttr "output_types" .~ fromTensorTypes (Proxy :: Proxy output_types) & op'options & opInputs .~ op'inputs) {- input_arg { name: "bytes" type: DT_STRING } output_arg { name: "sizes" type: DT_INT32 } output_arg { name: "values" type_list_attr: "output_types" } attr { name: "message_type" type: "string" } attr { name: "field_names" type: "list(string)" } attr { name: "output_types" type: "list(type)" has_minimum: true } attr { name: "descriptor_source" type: "string" default_value { s: "local://" } } attr { name: "message_format" type: "string" default_value { s: "binary" } } attr { name: "sanitize" type: "bool" default_value { b: false } } -} -- | decodeRaw :: forall v'1 out_type . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Bool, Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] out_type) => Tensor v'1 Data.ByteString.ByteString -- ^ __bytes__ -> Tensor Build out_type -- ^ __output__ decodeRaw = decodeRaw' id decodeRaw' :: forall v'1 out_type . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Bool, Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] out_type) => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __bytes__ -> Tensor Build out_type -- ^ __output__ decodeRaw' op'options bytes | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs bytes] return (opDef "DecodeRaw" & opAttr "out_type" .~ tensorType (undefined :: out_type) & op'options & opInputs .~ op'inputs) {- input_arg { name: "bytes" type: DT_STRING } output_arg { name: "output" type_attr: "out_type" } attr { name: "out_type" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT16 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_BOOL } } } attr { name: "little_endian" type: "bool" default_value { b: true } } -} -- | decodeWav :: Tensor v'1 Data.ByteString.ByteString -- ^ __contents__ -> (Tensor Build Float, Tensor Build Data.Int.Int32) -- ^ (__audio__, __sample_rate__) -- -- * __audio__ -- -- * __sample_rate__ decodeWav = decodeWav' id decodeWav' :: OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __contents__ -> (Tensor Build Float, Tensor Build Data.Int.Int32) -- ^ (__audio__, __sample_rate__) -- -- * __audio__ -- -- * __sample_rate__ decodeWav' op'options contents | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs contents] return (opDef "DecodeWav" & op'options & opInputs .~ op'inputs) {- input_arg { name: "contents" type: DT_STRING } output_arg { name: "audio" type: DT_FLOAT } output_arg { name: "sample_rate" type: DT_INT32 } attr { name: "desired_channels" type: "int" default_value { i: -1 } } attr { name: "desired_samples" type: "int" default_value { i: -1 } } -} -- | deepCopy :: forall v'1 t m' . (MonadBuild m', TensorType t) => Tensor v'1 t -- ^ __x__ -> m' (Tensor Value t) -- ^ __y__ deepCopy = deepCopy' id deepCopy' :: forall v'1 t m' . (MonadBuild m', TensorType t) => OpParams -> Tensor v'1 t -- ^ __x__ -> m' (Tensor Value t) -- ^ __y__ deepCopy' op'options x | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] buildOp [] (opDef "DeepCopy" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" } -} -- | deleteIterator :: forall v'1 v'2 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __handle__ -> Tensor v'2 Variant -- ^ __deleter__ -> m' (ControlNode) deleteIterator = deleteIterator' id deleteIterator' :: forall v'1 v'2 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __handle__ -> Tensor v'2 Variant -- ^ __deleter__ -> m' (ControlNode) deleteIterator' op'options handle deleter | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs deleter] buildOp [] (opDef "DeleteIterator" & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_RESOURCE } input_arg { name: "deleter" type: DT_VARIANT } -} -- | deleteSessionTensor :: forall v'1 m' . (MonadBuild m') => Tensor v'1 Data.ByteString.ByteString -- ^ __handle__ -> m' (ControlNode) deleteSessionTensor = deleteSessionTensor' id deleteSessionTensor' :: forall v'1 m' . (MonadBuild m') => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __handle__ -> m' (ControlNode) deleteSessionTensor' op'options handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle] buildOp [] (opDef "DeleteSessionTensor" & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_STRING } -} -- | denseToDenseSetOperation :: forall v'1 v'2 t . (OneOf '[Data.ByteString.ByteString, Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8] t) => Tensor v'1 t -- ^ __set1__ -> Tensor v'2 t -- ^ __set2__ -> (Tensor Build Data.Int.Int64, Tensor Build t, Tensor Build Data.Int.Int64) -- ^ (__result_indices__, __result_values__, __result_shape__) -- -- * __result_indices__ -- -- * __result_values__ -- -- * __result_shape__ denseToDenseSetOperation = denseToDenseSetOperation' id denseToDenseSetOperation' :: forall v'1 v'2 t . (OneOf '[Data.ByteString.ByteString, Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8] t) => OpParams -> Tensor v'1 t -- ^ __set1__ -> Tensor v'2 t -- ^ __set2__ -> (Tensor Build Data.Int.Int64, Tensor Build t, Tensor Build Data.Int.Int64) -- ^ (__result_indices__, __result_values__, __result_shape__) -- -- * __result_indices__ -- -- * __result_values__ -- -- * __result_shape__ denseToDenseSetOperation' op'options set1 set2 | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs set1, buildInputs set2] return (opDef "DenseToDenseSetOperation" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "set1" type_attr: "T" } input_arg { name: "set2" type_attr: "T" } output_arg { name: "result_indices" type: DT_INT64 } output_arg { name: "result_values" type_attr: "T" } output_arg { name: "result_shape" type: DT_INT64 } attr { name: "set_operation" type: "string" } attr { name: "validate_indices" type: "bool" default_value { b: true } } attr { name: "T" type: "type" allowed_values { list { type: DT_INT8 type: DT_INT16 type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_UINT16 type: DT_STRING } } } -} -- | denseToSparseSetOperation :: forall v'1 v'2 v'3 v'4 t . (OneOf '[Data.ByteString.ByteString, Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8] t) => Tensor v'1 t -- ^ __set1__ -> Tensor v'2 Data.Int.Int64 -- ^ __set2_indices__ -> Tensor v'3 t -- ^ __set2_values__ -> Tensor v'4 Data.Int.Int64 -- ^ __set2_shape__ -> (Tensor Build Data.Int.Int64, Tensor Build t, Tensor Build Data.Int.Int64) -- ^ (__result_indices__, __result_values__, __result_shape__) -- -- * __result_indices__ -- -- * __result_values__ -- -- * __result_shape__ denseToSparseSetOperation = denseToSparseSetOperation' id denseToSparseSetOperation' :: forall v'1 v'2 v'3 v'4 t . (OneOf '[Data.ByteString.ByteString, Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8] t) => OpParams -> Tensor v'1 t -- ^ __set1__ -> Tensor v'2 Data.Int.Int64 -- ^ __set2_indices__ -> Tensor v'3 t -- ^ __set2_values__ -> Tensor v'4 Data.Int.Int64 -- ^ __set2_shape__ -> (Tensor Build Data.Int.Int64, Tensor Build t, Tensor Build Data.Int.Int64) -- ^ (__result_indices__, __result_values__, __result_shape__) -- -- * __result_indices__ -- -- * __result_values__ -- -- * __result_shape__ denseToSparseSetOperation' op'options set1 set2_indices set2_values set2_shape | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs set1, buildInputs set2_indices, buildInputs set2_values, buildInputs set2_shape] return (opDef "DenseToSparseSetOperation" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "set1" type_attr: "T" } input_arg { name: "set2_indices" type: DT_INT64 } input_arg { name: "set2_values" type_attr: "T" } input_arg { name: "set2_shape" type: DT_INT64 } output_arg { name: "result_indices" type: DT_INT64 } output_arg { name: "result_values" type_attr: "T" } output_arg { name: "result_shape" type: DT_INT64 } attr { name: "set_operation" type: "string" } attr { name: "validate_indices" type: "bool" default_value { b: true } } attr { name: "T" type: "type" allowed_values { list { type: DT_INT8 type: DT_INT16 type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_UINT16 type: DT_STRING } } } -} -- | depthToSpace :: forall v'1 t . (TensorType t) => Data.Int.Int64 -- ^ __block_size__ -> Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ depthToSpace = depthToSpace' id depthToSpace' :: forall v'1 t . (TensorType t) => OpParams -> Data.Int.Int64 -- ^ __block_size__ -> Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ depthToSpace' op'options block_size input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "DepthToSpace" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "block_size" .~ block_size & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "block_size" type: "int" has_minimum: true minimum: 2 } attr { name: "data_format" type: "string" default_value { s: "NHWC" } allowed_values { list { s: "NHWC" s: "NCHW" s: "NCHW_VECT_C" } } } -} -- | depthwiseConv2dNative :: forall v'1 v'2 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 t -- ^ __filter__ -> Tensor Build t -- ^ __output__ depthwiseConv2dNative = depthwiseConv2dNative' id depthwiseConv2dNative' :: forall v'1 v'2 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 t -- ^ __filter__ -> Tensor Build t -- ^ __output__ depthwiseConv2dNative' op'options input filter | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs filter] return (opDef "DepthwiseConv2dNative" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "filter" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_BFLOAT16 type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "strides" type: "list(int)" } attr { name: "padding" type: "string" allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "data_format" type: "string" default_value { s: "NHWC" } allowed_values { list { s: "NHWC" s: "NCHW" } } } attr { name: "dilations" type: "list(int)" default_value { list { i: 1 i: 1 i: 1 i: 1 } } } -} -- | depthwiseConv2dNativeBackpropFilter :: forall v'1 v'2 v'3 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 Data.Int.Int32 -- ^ __filter_sizes__ -> Tensor v'3 t -- ^ __out_backprop__ -> Tensor Build t -- ^ __output__ depthwiseConv2dNativeBackpropFilter = depthwiseConv2dNativeBackpropFilter' id depthwiseConv2dNativeBackpropFilter' :: forall v'1 v'2 v'3 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 Data.Int.Int32 -- ^ __filter_sizes__ -> Tensor v'3 t -- ^ __out_backprop__ -> Tensor Build t -- ^ __output__ depthwiseConv2dNativeBackpropFilter' op'options input filter_sizes out_backprop | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs filter_sizes, buildInputs out_backprop] return (opDef "DepthwiseConv2dNativeBackpropFilter" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "filter_sizes" type: DT_INT32 } input_arg { name: "out_backprop" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_BFLOAT16 type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "strides" type: "list(int)" } attr { name: "padding" type: "string" allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "data_format" type: "string" default_value { s: "NHWC" } allowed_values { list { s: "NHWC" s: "NCHW" } } } attr { name: "dilations" type: "list(int)" default_value { list { i: 1 i: 1 i: 1 i: 1 } } } -} -- | depthwiseConv2dNativeBackpropInput :: forall v'1 v'2 v'3 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 Data.Int.Int32 -- ^ __input_sizes__ -> Tensor v'2 t -- ^ __filter__ -> Tensor v'3 t -- ^ __out_backprop__ -> Tensor Build t -- ^ __output__ depthwiseConv2dNativeBackpropInput = depthwiseConv2dNativeBackpropInput' id depthwiseConv2dNativeBackpropInput' :: forall v'1 v'2 v'3 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 Data.Int.Int32 -- ^ __input_sizes__ -> Tensor v'2 t -- ^ __filter__ -> Tensor v'3 t -- ^ __out_backprop__ -> Tensor Build t -- ^ __output__ depthwiseConv2dNativeBackpropInput' op'options input_sizes filter out_backprop | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_sizes, buildInputs filter, buildInputs out_backprop] return (opDef "DepthwiseConv2dNativeBackpropInput" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_sizes" type: DT_INT32 } input_arg { name: "filter" type_attr: "T" } input_arg { name: "out_backprop" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_BFLOAT16 type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "strides" type: "list(int)" } attr { name: "padding" type: "string" allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "data_format" type: "string" default_value { s: "NHWC" } allowed_values { list { s: "NHWC" s: "NCHW" } } } attr { name: "dilations" type: "list(int)" default_value { list { i: 1 i: 1 i: 1 i: 1 } } } -} -- | dequantize :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] t) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 Float -- ^ __min_range__ -> Tensor v'3 Float -- ^ __max_range__ -> Tensor Build Float -- ^ __output__ dequantize = dequantize' id dequantize' :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 Float -- ^ __min_range__ -> Tensor v'3 Float -- ^ __max_range__ -> Tensor Build Float -- ^ __output__ dequantize' op'options input min_range max_range | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs min_range, buildInputs max_range] return (opDef "Dequantize" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "min_range" type: DT_FLOAT } input_arg { name: "max_range" type: DT_FLOAT } output_arg { name: "output" type: DT_FLOAT } attr { name: "T" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "mode" type: "string" default_value { s: "MIN_COMBINED" } allowed_values { list { s: "MIN_COMBINED" s: "MIN_FIRST" s: "SCALED" } } } -} -- | deserializeIterator :: forall v'1 v'2 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __resource_handle__ -> Tensor v'2 Variant -- ^ __serialized__ -> m' (ControlNode) deserializeIterator = deserializeIterator' id deserializeIterator' :: forall v'1 v'2 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __resource_handle__ -> Tensor v'2 Variant -- ^ __serialized__ -> m' (ControlNode) deserializeIterator' op'options resource_handle serialized | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs resource_handle, buildInputs serialized] buildOp [] (opDef "DeserializeIterator" & op'options & opInputs .~ op'inputs) {- input_arg { name: "resource_handle" type: DT_RESOURCE } input_arg { name: "serialized" type: DT_VARIANT } -} -- | deserializeManySparse :: forall v'1 dtype . (TensorType dtype) => Tensor v'1 Data.ByteString.ByteString -- ^ __serialized_sparse__ -> (Tensor Build Data.Int.Int64, Tensor Build dtype, Tensor Build Data.Int.Int64) -- ^ (__sparse_indices__, __sparse_values__, __sparse_shape__) -- -- * __sparse_indices__ -- -- * __sparse_values__ -- -- * __sparse_shape__ deserializeManySparse = deserializeManySparse' id deserializeManySparse' :: forall v'1 dtype . (TensorType dtype) => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __serialized_sparse__ -> (Tensor Build Data.Int.Int64, Tensor Build dtype, Tensor Build Data.Int.Int64) -- ^ (__sparse_indices__, __sparse_values__, __sparse_shape__) -- -- * __sparse_indices__ -- -- * __sparse_values__ -- -- * __sparse_shape__ deserializeManySparse' op'options serialized_sparse | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs serialized_sparse] return (opDef "DeserializeManySparse" & opAttr "dtype" .~ tensorType (undefined :: dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "serialized_sparse" type: DT_STRING } output_arg { name: "sparse_indices" type: DT_INT64 } output_arg { name: "sparse_values" type_attr: "dtype" } output_arg { name: "sparse_shape" type: DT_INT64 } attr { name: "dtype" type: "type" } -} -- | deserializeSparse :: forall v'1 dtype tserialized . (TensorType dtype, OneOf '[Data.ByteString.ByteString, Variant] tserialized) => Tensor v'1 tserialized -- ^ __serialized_sparse__ -> (Tensor Build Data.Int.Int64, Tensor Build dtype, Tensor Build Data.Int.Int64) -- ^ (__sparse_indices__, __sparse_values__, __sparse_shape__) -- -- * __sparse_indices__ -- -- * __sparse_values__ -- -- * __sparse_shape__ deserializeSparse = deserializeSparse' id deserializeSparse' :: forall v'1 dtype tserialized . (TensorType dtype, OneOf '[Data.ByteString.ByteString, Variant] tserialized) => OpParams -> Tensor v'1 tserialized -- ^ __serialized_sparse__ -> (Tensor Build Data.Int.Int64, Tensor Build dtype, Tensor Build Data.Int.Int64) -- ^ (__sparse_indices__, __sparse_values__, __sparse_shape__) -- -- * __sparse_indices__ -- -- * __sparse_values__ -- -- * __sparse_shape__ deserializeSparse' op'options serialized_sparse | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs serialized_sparse] return (opDef "DeserializeSparse" & opAttr "dtype" .~ tensorType (undefined :: dtype) & opAttr "Tserialized" .~ tensorType (undefined :: tserialized) & op'options & opInputs .~ op'inputs) {- input_arg { name: "serialized_sparse" type_attr: "Tserialized" } output_arg { name: "sparse_indices" type: DT_INT64 } output_arg { name: "sparse_values" type_attr: "dtype" } output_arg { name: "sparse_shape" type: DT_INT64 } attr { name: "dtype" type: "type" } attr { name: "Tserialized" type: "type" default_value { type: DT_STRING } allowed_values { list { type: DT_STRING type: DT_VARIANT } } } -} -- | destroyResourceOp :: forall v'1 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __resource__ -> m' (ControlNode) destroyResourceOp = destroyResourceOp' id destroyResourceOp' :: forall v'1 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __resource__ -> m' (ControlNode) destroyResourceOp' op'options resource | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs resource] buildOp [] (opDef "DestroyResourceOp" & op'options & opInputs .~ op'inputs) {- input_arg { name: "resource" type: DT_RESOURCE } attr { name: "ignore_lookup_error" type: "bool" default_value { b: true } } -} -- | destroyTemporaryVariable :: forall t m' . (MonadBuild m', TensorType t) => Tensor Ref t -- ^ __ref__ -> m' (Tensor Value t) -- ^ __value__ destroyTemporaryVariable = destroyTemporaryVariable' id destroyTemporaryVariable' :: forall t m' . (MonadBuild m', TensorType t) => OpParams -> Tensor Ref t -- ^ __ref__ -> m' (Tensor Value t) -- ^ __value__ destroyTemporaryVariable' op'options ref | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs ref] buildOp [] (opDef "DestroyTemporaryVariable" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "ref" type_attr: "T" is_ref: true } output_arg { name: "value" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "var_name" type: "string" } -} -- | diag :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __diagonal__ -> Tensor Build t -- ^ __output__ diag = diag' id diag' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __diagonal__ -> Tensor Build t -- ^ __output__ diag' op'options diagonal | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs diagonal] return (opDef "Diag" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "diagonal" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | diagPart :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __diagonal__ diagPart = diagPart' id diagPart' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __diagonal__ diagPart' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "DiagPart" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "diagonal" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | digamma :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ digamma = digamma' id digamma' :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ digamma' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "Digamma" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } -} -- | dilation2D :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 t -- ^ __filter__ -> Tensor Build t -- ^ __output__ dilation2D = dilation2D' id dilation2D' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 t -- ^ __filter__ -> Tensor Build t -- ^ __output__ dilation2D' op'options input filter | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs filter] return (opDef "Dilation2D" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "filter" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_INT64 type: DT_BFLOAT16 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "strides" type: "list(int)" has_minimum: true minimum: 4 } attr { name: "rates" type: "list(int)" has_minimum: true minimum: 4 } attr { name: "padding" type: "string" allowed_values { list { s: "SAME" s: "VALID" } } } -} -- | dilation2DBackpropFilter :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 t -- ^ __filter__ -> Tensor v'3 t -- ^ __out_backprop__ -> Tensor Build t -- ^ __filter_backprop__ dilation2DBackpropFilter = dilation2DBackpropFilter' id dilation2DBackpropFilter' :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 t -- ^ __filter__ -> Tensor v'3 t -- ^ __out_backprop__ -> Tensor Build t -- ^ __filter_backprop__ dilation2DBackpropFilter' op'options input filter out_backprop | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs filter, buildInputs out_backprop] return (opDef "Dilation2DBackpropFilter" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "filter" type_attr: "T" } input_arg { name: "out_backprop" type_attr: "T" } output_arg { name: "filter_backprop" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_INT64 type: DT_BFLOAT16 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "strides" type: "list(int)" has_minimum: true minimum: 4 } attr { name: "rates" type: "list(int)" has_minimum: true minimum: 4 } attr { name: "padding" type: "string" allowed_values { list { s: "SAME" s: "VALID" } } } -} -- | dilation2DBackpropInput :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 t -- ^ __filter__ -> Tensor v'3 t -- ^ __out_backprop__ -> Tensor Build t -- ^ __in_backprop__ dilation2DBackpropInput = dilation2DBackpropInput' id dilation2DBackpropInput' :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 t -- ^ __filter__ -> Tensor v'3 t -- ^ __out_backprop__ -> Tensor Build t -- ^ __in_backprop__ dilation2DBackpropInput' op'options input filter out_backprop | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs filter, buildInputs out_backprop] return (opDef "Dilation2DBackpropInput" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "filter" type_attr: "T" } input_arg { name: "out_backprop" type_attr: "T" } output_arg { name: "in_backprop" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_INT64 type: DT_BFLOAT16 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "strides" type: "list(int)" has_minimum: true minimum: 4 } attr { name: "rates" type: "list(int)" has_minimum: true minimum: 4 } attr { name: "padding" type: "string" allowed_values { list { s: "SAME" s: "VALID" } } } -} -- | div :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ div = div' id div' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ div' op'options x y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y] return (opDef "Div" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_UINT8 type: DT_INT8 type: DT_UINT16 type: DT_INT16 type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | divNoNan :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ divNoNan = divNoNan' id divNoNan' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ divNoNan' op'options x y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y] return (opDef "DivNoNan" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | drawBoundingBoxes :: forall v'1 v'2 t . (OneOf '[Data.Word.Word16, Float] t) => Tensor v'1 t -- ^ __images__ -> Tensor v'2 Float -- ^ __boxes__ -> Tensor Build t -- ^ __output__ drawBoundingBoxes = drawBoundingBoxes' id drawBoundingBoxes' :: forall v'1 v'2 t . (OneOf '[Data.Word.Word16, Float] t) => OpParams -> Tensor v'1 t -- ^ __images__ -> Tensor v'2 Float -- ^ __boxes__ -> Tensor Build t -- ^ __output__ drawBoundingBoxes' op'options images boxes | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs images, buildInputs boxes] return (opDef "DrawBoundingBoxes" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "images" type_attr: "T" } input_arg { name: "boxes" type: DT_FLOAT } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_FLOAT type: DT_HALF } } } -} -- | drawBoundingBoxesV2 :: forall v'1 v'2 v'3 t . (OneOf '[Data.Word.Word16, Float] t) => Tensor v'1 t -- ^ __images__ -> Tensor v'2 Float -- ^ __boxes__ -> Tensor v'3 Float -- ^ __colors__ -> Tensor Build t -- ^ __output__ drawBoundingBoxesV2 = drawBoundingBoxesV2' id drawBoundingBoxesV2' :: forall v'1 v'2 v'3 t . (OneOf '[Data.Word.Word16, Float] t) => OpParams -> Tensor v'1 t -- ^ __images__ -> Tensor v'2 Float -- ^ __boxes__ -> Tensor v'3 Float -- ^ __colors__ -> Tensor Build t -- ^ __output__ drawBoundingBoxesV2' op'options images boxes colors | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs images, buildInputs boxes, buildInputs colors] return (opDef "DrawBoundingBoxesV2" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "images" type_attr: "T" } input_arg { name: "boxes" type: DT_FLOAT } input_arg { name: "colors" type: DT_FLOAT } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_FLOAT type: DT_HALF } } } -} -- | dynamicPartition :: forall v'1 v'2 t . (TensorType t) => Data.Int.Int64 -- ^ __num_partitions__ -> Tensor v'1 t -- ^ __data__ -> Tensor v'2 Data.Int.Int32 -- ^ __partitions__ -> [Tensor Build t] -- ^ __outputs__ dynamicPartition = dynamicPartition' id dynamicPartition' :: forall v'1 v'2 t . (TensorType t) => OpParams -> Data.Int.Int64 -- ^ __num_partitions__ -> Tensor v'1 t -- ^ __data__ -> Tensor v'2 Data.Int.Int32 -- ^ __partitions__ -> [Tensor Build t] -- ^ __outputs__ dynamicPartition' op'options num_partitions data' partitions | eqLengthGuard [] = pureOp [num_partitions] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs data', buildInputs partitions] return (opDef "DynamicPartition" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "num_partitions" .~ num_partitions & op'options & opInputs .~ op'inputs) {- input_arg { name: "data" type_attr: "T" } input_arg { name: "partitions" type: DT_INT32 } output_arg { name: "outputs" type_attr: "T" number_attr: "num_partitions" } attr { name: "num_partitions" type: "int" has_minimum: true minimum: 1 } attr { name: "T" type: "type" } -} -- | dynamicStitch :: forall v'1 v'2 t . (TensorType t) => [Tensor v'1 Data.Int.Int32] -- ^ __indices__ -> [Tensor v'2 t] -- ^ __data__ -> Tensor Build t -- ^ __merged__ dynamicStitch = dynamicStitch' id dynamicStitch' :: forall v'1 v'2 t . (TensorType t) => OpParams -> [Tensor v'1 Data.Int.Int32] -- ^ __indices__ -> [Tensor v'2 t] -- ^ __data__ -> Tensor Build t -- ^ __merged__ dynamicStitch' op'options indices data' | eqLengthGuard [("N", [("indices", length indices), ("data", length data')])] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs indices, buildInputs data'] return (opDef "DynamicStitch" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "N" .~ n & op'options & opInputs .~ op'inputs) where n = fromIntegral (length indices) :: Int64 {- input_arg { name: "indices" type: DT_INT32 number_attr: "N" } input_arg { name: "data" type_attr: "T" number_attr: "N" } output_arg { name: "merged" type_attr: "T" } attr { name: "N" type: "int" has_minimum: true minimum: 1 } attr { name: "T" type: "type" } -} -- | eagerPyFunc :: forall v'1 tin tout m' . (MonadBuild m', TensorTypes tin, TensorTypes tout) => TensorList (v'1) tin -- ^ __input__ -> m' (TensorList (Value) tout) -- ^ __output__ eagerPyFunc = eagerPyFunc' id eagerPyFunc' :: forall v'1 tin tout m' . (MonadBuild m', TensorTypes tin, TensorTypes tout) => OpParams -> TensorList (v'1) tin -- ^ __input__ -> m' (TensorList (Value) tout) -- ^ __output__ eagerPyFunc' op'options input | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] buildOp [] (opDef "EagerPyFunc" & opAttr "Tin" .~ fromTensorTypes (Proxy :: Proxy tin) & opAttr "Tout" .~ fromTensorTypes (Proxy :: Proxy tout) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_list_attr: "Tin" } output_arg { name: "output" type_list_attr: "Tout" } attr { name: "token" type: "string" } attr { name: "Tin" type: "list(type)" has_minimum: true } attr { name: "Tout" type: "list(type)" has_minimum: true } -} -- | editDistance :: forall v'1 v'2 v'3 v'4 v'5 v'6 t . (TensorType t) => Tensor v'1 Data.Int.Int64 -- ^ __hypothesis_indices__ -> Tensor v'2 t -- ^ __hypothesis_values__ -> Tensor v'3 Data.Int.Int64 -- ^ __hypothesis_shape__ -> Tensor v'4 Data.Int.Int64 -- ^ __truth_indices__ -> Tensor v'5 t -- ^ __truth_values__ -> Tensor v'6 Data.Int.Int64 -- ^ __truth_shape__ -> Tensor Build Float -- ^ __output__ editDistance = editDistance' id editDistance' :: forall v'1 v'2 v'3 v'4 v'5 v'6 t . (TensorType t) => OpParams -> Tensor v'1 Data.Int.Int64 -- ^ __hypothesis_indices__ -> Tensor v'2 t -- ^ __hypothesis_values__ -> Tensor v'3 Data.Int.Int64 -- ^ __hypothesis_shape__ -> Tensor v'4 Data.Int.Int64 -- ^ __truth_indices__ -> Tensor v'5 t -- ^ __truth_values__ -> Tensor v'6 Data.Int.Int64 -- ^ __truth_shape__ -> Tensor Build Float -- ^ __output__ editDistance' op'options hypothesis_indices hypothesis_values hypothesis_shape truth_indices truth_values truth_shape | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs hypothesis_indices, buildInputs hypothesis_values, buildInputs hypothesis_shape, buildInputs truth_indices, buildInputs truth_values, buildInputs truth_shape] return (opDef "EditDistance" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "hypothesis_indices" type: DT_INT64 } input_arg { name: "hypothesis_values" type_attr: "T" } input_arg { name: "hypothesis_shape" type: DT_INT64 } input_arg { name: "truth_indices" type: DT_INT64 } input_arg { name: "truth_values" type_attr: "T" } input_arg { name: "truth_shape" type: DT_INT64 } output_arg { name: "output" type: DT_FLOAT } attr { name: "normalize" type: "bool" default_value { b: true } } attr { name: "T" type: "type" } -} -- | elu :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __features__ -> Tensor Build t -- ^ __activations__ elu = elu' id elu' :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __features__ -> Tensor Build t -- ^ __activations__ elu' op'options features | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs features] return (opDef "Elu" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "features" type_attr: "T" } output_arg { name: "activations" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_BFLOAT16 type: DT_FLOAT type: DT_DOUBLE } } } -} -- | eluGrad :: forall v'1 v'2 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __gradients__ -> Tensor v'2 t -- ^ __outputs__ -> Tensor Build t -- ^ __backprops__ eluGrad = eluGrad' id eluGrad' :: forall v'1 v'2 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __gradients__ -> Tensor v'2 t -- ^ __outputs__ -> Tensor Build t -- ^ __backprops__ eluGrad' op'options gradients outputs | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs gradients, buildInputs outputs] return (opDef "EluGrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "gradients" type_attr: "T" } input_arg { name: "outputs" type_attr: "T" } output_arg { name: "backprops" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_BFLOAT16 type: DT_FLOAT type: DT_DOUBLE } } } -} -- | empty :: forall v'1 dtype m' . (MonadBuild m', TensorType dtype) => Tensor v'1 Data.Int.Int32 -- ^ __shape__ -> m' (Tensor Value dtype) -- ^ __output__ empty = empty' id empty' :: forall v'1 dtype m' . (MonadBuild m', TensorType dtype) => OpParams -> Tensor v'1 Data.Int.Int32 -- ^ __shape__ -> m' (Tensor Value dtype) -- ^ __output__ empty' op'options shape | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs shape] buildOp [] (opDef "Empty" & opAttr "dtype" .~ tensorType (undefined :: dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "shape" type: DT_INT32 } output_arg { name: "output" type_attr: "dtype" } attr { name: "dtype" type: "type" } attr { name: "init" type: "bool" default_value { b: false } } -} -- | emptyTensorList :: forall v'1 v'2 shape_type . (OneOf '[Data.Int.Int32, Data.Int.Int64] shape_type) => DataType -- ^ __element_dtype__ -> Tensor v'1 shape_type -- ^ __element_shape__ -> Tensor v'2 Data.Int.Int32 -- ^ __max_num_elements__ -> Tensor Build Variant -- ^ __handle__ emptyTensorList = emptyTensorList' id emptyTensorList' :: forall v'1 v'2 shape_type . (OneOf '[Data.Int.Int32, Data.Int.Int64] shape_type) => OpParams -> DataType -- ^ __element_dtype__ -> Tensor v'1 shape_type -- ^ __element_shape__ -> Tensor v'2 Data.Int.Int32 -- ^ __max_num_elements__ -> Tensor Build Variant -- ^ __handle__ emptyTensorList' op'options element_dtype element_shape max_num_elements | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs element_shape, buildInputs max_num_elements] return (opDef "EmptyTensorList" & opAttr "shape_type" .~ tensorType (undefined :: shape_type) & opAttr "element_dtype" .~ element_dtype & op'options & opInputs .~ op'inputs) {- input_arg { name: "element_shape" type_attr: "shape_type" } input_arg { name: "max_num_elements" type: DT_INT32 } output_arg { name: "handle" type: DT_VARIANT } attr { name: "element_dtype" type: "type" } attr { name: "shape_type" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | encodeBase64 :: Tensor v'1 Data.ByteString.ByteString -- ^ __input__ -> Tensor Build Data.ByteString.ByteString -- ^ __output__ encodeBase64 = encodeBase64' id encodeBase64' :: OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __input__ -> Tensor Build Data.ByteString.ByteString -- ^ __output__ encodeBase64' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "EncodeBase64" & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type: DT_STRING } output_arg { name: "output" type: DT_STRING } attr { name: "pad" type: "bool" default_value { b: false } } -} -- | encodeJpeg :: Tensor v'1 Data.Word.Word8 -- ^ __image__ -> Tensor Build Data.ByteString.ByteString -- ^ __contents__ encodeJpeg = encodeJpeg' id encodeJpeg' :: OpParams -> Tensor v'1 Data.Word.Word8 -- ^ __image__ -> Tensor Build Data.ByteString.ByteString -- ^ __contents__ encodeJpeg' op'options image | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs image] return (opDef "EncodeJpeg" & op'options & opInputs .~ op'inputs) {- input_arg { name: "image" type: DT_UINT8 } output_arg { name: "contents" type: DT_STRING } attr { name: "format" type: "string" default_value { s: "" } allowed_values { list { s: "" s: "grayscale" s: "rgb" } } } attr { name: "quality" type: "int" default_value { i: 95 } } attr { name: "progressive" type: "bool" default_value { b: false } } attr { name: "optimize_size" type: "bool" default_value { b: false } } attr { name: "chroma_downsampling" type: "bool" default_value { b: true } } attr { name: "density_unit" type: "string" default_value { s: "in" } allowed_values { list { s: "in" s: "cm" } } } attr { name: "x_density" type: "int" default_value { i: 300 } } attr { name: "y_density" type: "int" default_value { i: 300 } } attr { name: "xmp_metadata" type: "string" default_value { s: "" } } -} -- | encodeJpegVariableQuality :: Tensor v'1 Data.Word.Word8 -- ^ __images__ -> Tensor v'2 Data.Int.Int32 -- ^ __quality__ -> Tensor Build Data.ByteString.ByteString -- ^ __contents__ encodeJpegVariableQuality = encodeJpegVariableQuality' id encodeJpegVariableQuality' :: OpParams -> Tensor v'1 Data.Word.Word8 -- ^ __images__ -> Tensor v'2 Data.Int.Int32 -- ^ __quality__ -> Tensor Build Data.ByteString.ByteString -- ^ __contents__ encodeJpegVariableQuality' op'options images quality | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs images, buildInputs quality] return (opDef "EncodeJpegVariableQuality" & op'options & opInputs .~ op'inputs) {- input_arg { name: "images" type: DT_UINT8 } input_arg { name: "quality" type: DT_INT32 } output_arg { name: "contents" type: DT_STRING } -} -- | encodePng :: forall v'1 t . (OneOf '[Data.Word.Word16, Data.Word.Word8] t) => Tensor v'1 t -- ^ __image__ -> Tensor Build Data.ByteString.ByteString -- ^ __contents__ encodePng = encodePng' id encodePng' :: forall v'1 t . (OneOf '[Data.Word.Word16, Data.Word.Word8] t) => OpParams -> Tensor v'1 t -- ^ __image__ -> Tensor Build Data.ByteString.ByteString -- ^ __contents__ encodePng' op'options image | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs image] return (opDef "EncodePng" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "image" type_attr: "T" } output_arg { name: "contents" type: DT_STRING } attr { name: "compression" type: "int" default_value { i: -1 } } attr { name: "T" type: "type" default_value { type: DT_UINT8 } allowed_values { list { type: DT_UINT8 type: DT_UINT16 } } } -} -- | encodeProto :: forall v'1 v'2 tinput_types . (TensorTypes tinput_types) => Tensor v'1 Data.Int.Int32 -- ^ __sizes__ -> TensorList (v'2) tinput_types -- ^ __values__ -> Tensor Build Data.ByteString.ByteString -- ^ __bytes__ encodeProto = encodeProto' id encodeProto' :: forall v'1 v'2 tinput_types . (TensorTypes tinput_types) => OpParams -> Tensor v'1 Data.Int.Int32 -- ^ __sizes__ -> TensorList (v'2) tinput_types -- ^ __values__ -> Tensor Build Data.ByteString.ByteString -- ^ __bytes__ encodeProto' op'options sizes values | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs sizes, buildInputs values] return (opDef "EncodeProto" & opAttr "Tinput_types" .~ fromTensorTypes (Proxy :: Proxy tinput_types) & op'options & opInputs .~ op'inputs) {- input_arg { name: "sizes" type: DT_INT32 } input_arg { name: "values" type_list_attr: "Tinput_types" } output_arg { name: "bytes" type: DT_STRING } attr { name: "field_names" type: "list(string)" } attr { name: "message_type" type: "string" } attr { name: "descriptor_source" type: "string" default_value { s: "local://" } } attr { name: "Tinput_types" type: "list(type)" has_minimum: true minimum: 1 } -} -- | encodeWav :: Tensor v'1 Float -- ^ __audio__ -> Tensor v'2 Data.Int.Int32 -- ^ __sample_rate__ -> Tensor Build Data.ByteString.ByteString -- ^ __contents__ encodeWav = encodeWav' id encodeWav' :: OpParams -> Tensor v'1 Float -- ^ __audio__ -> Tensor v'2 Data.Int.Int32 -- ^ __sample_rate__ -> Tensor Build Data.ByteString.ByteString -- ^ __contents__ encodeWav' op'options audio sample_rate | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs audio, buildInputs sample_rate] return (opDef "EncodeWav" & op'options & opInputs .~ op'inputs) {- input_arg { name: "audio" type: DT_FLOAT } input_arg { name: "sample_rate" type: DT_INT32 } output_arg { name: "contents" type: DT_STRING } -} -- | enqueueTPUEmbeddingIntegerBatch :: forall v'1 v'2 m' . (MonadBuild m') => [Tensor v'1 Data.Int.Int32] -- ^ __batch__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __mode_override__ -> m' (ControlNode) enqueueTPUEmbeddingIntegerBatch = enqueueTPUEmbeddingIntegerBatch' id enqueueTPUEmbeddingIntegerBatch' :: forall v'1 v'2 m' . (MonadBuild m') => OpParams -> [Tensor v'1 Data.Int.Int32] -- ^ __batch__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __mode_override__ -> m' (ControlNode) enqueueTPUEmbeddingIntegerBatch' op'options batch mode_override | eqLengthGuard [("N", [("batch", length batch)])] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs batch, buildInputs mode_override] buildOp [] (opDef "EnqueueTPUEmbeddingIntegerBatch" & opAttr "N" .~ n & op'options & opInputs .~ op'inputs) where n = fromIntegral (length batch) :: Int64 {- input_arg { name: "batch" type: DT_INT32 number_attr: "N" } input_arg { name: "mode_override" type: DT_STRING } attr { name: "N" type: "int" has_minimum: true minimum: 1 } attr { name: "device_ordinal" type: "int" default_value { i: -1 } } -} -- | enqueueTPUEmbeddingSparseBatch :: forall v'1 v'2 v'3 v'4 t1 t2 t3 m' . (MonadBuild m', OneOf '[Data.Int.Int32, Data.Int.Int64] t1, OneOf '[Data.Int.Int32, Data.Int.Int64] t2, OneOf '[Double, Float] t3) => [Tensor v'1 t1] -- ^ __sample_indices__ -> [Tensor v'2 t2] -- ^ __embedding_indices__ -> [Tensor v'3 t3] -- ^ __aggregation_weights__ -> Tensor v'4 Data.ByteString.ByteString -- ^ __mode_override__ -> m' (ControlNode) enqueueTPUEmbeddingSparseBatch = enqueueTPUEmbeddingSparseBatch' id enqueueTPUEmbeddingSparseBatch' :: forall v'1 v'2 v'3 v'4 t1 t2 t3 m' . (MonadBuild m', OneOf '[Data.Int.Int32, Data.Int.Int64] t1, OneOf '[Data.Int.Int32, Data.Int.Int64] t2, OneOf '[Double, Float] t3) => OpParams -> [Tensor v'1 t1] -- ^ __sample_indices__ -> [Tensor v'2 t2] -- ^ __embedding_indices__ -> [Tensor v'3 t3] -- ^ __aggregation_weights__ -> Tensor v'4 Data.ByteString.ByteString -- ^ __mode_override__ -> m' (ControlNode) enqueueTPUEmbeddingSparseBatch' op'options sample_indices embedding_indices aggregation_weights mode_override | eqLengthGuard [("N", [("sample_indices", length sample_indices), ("embedding_indices", length embedding_indices), ("aggregation_weights", length aggregation_weights)])] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs sample_indices, buildInputs embedding_indices, buildInputs aggregation_weights, buildInputs mode_override] buildOp [] (opDef "EnqueueTPUEmbeddingSparseBatch" & opAttr "T1" .~ tensorType (undefined :: t1) & opAttr "T2" .~ tensorType (undefined :: t2) & opAttr "T3" .~ tensorType (undefined :: t3) & opAttr "N" .~ n & op'options & opInputs .~ op'inputs) where n = fromIntegral (length sample_indices) :: Int64 {- input_arg { name: "sample_indices" type_attr: "T1" number_attr: "N" } input_arg { name: "embedding_indices" type_attr: "T2" number_attr: "N" } input_arg { name: "aggregation_weights" type_attr: "T3" number_attr: "N" } input_arg { name: "mode_override" type: DT_STRING } attr { name: "T1" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "T2" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "T3" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "N" type: "int" has_minimum: true minimum: 1 } attr { name: "device_ordinal" type: "int" default_value { i: -1 } } attr { name: "combiners" type: "list(string)" default_value { list { } } } -} -- | enqueueTPUEmbeddingSparseTensorBatch :: forall v'1 v'2 v'3 v'4 t1 t2 t3 m' . (MonadBuild m', OneOf '[Data.Int.Int32, Data.Int.Int64] t1, OneOf '[Data.Int.Int32, Data.Int.Int64] t2, OneOf '[Double, Float] t3) => [Tensor v'1 t1] -- ^ __sample_indices__ -> [Tensor v'2 t2] -- ^ __embedding_indices__ -> [Tensor v'3 t3] -- ^ __aggregation_weights__ -> Tensor v'4 Data.ByteString.ByteString -- ^ __mode_override__ -> m' (ControlNode) enqueueTPUEmbeddingSparseTensorBatch = enqueueTPUEmbeddingSparseTensorBatch' id enqueueTPUEmbeddingSparseTensorBatch' :: forall v'1 v'2 v'3 v'4 t1 t2 t3 m' . (MonadBuild m', OneOf '[Data.Int.Int32, Data.Int.Int64] t1, OneOf '[Data.Int.Int32, Data.Int.Int64] t2, OneOf '[Double, Float] t3) => OpParams -> [Tensor v'1 t1] -- ^ __sample_indices__ -> [Tensor v'2 t2] -- ^ __embedding_indices__ -> [Tensor v'3 t3] -- ^ __aggregation_weights__ -> Tensor v'4 Data.ByteString.ByteString -- ^ __mode_override__ -> m' (ControlNode) enqueueTPUEmbeddingSparseTensorBatch' op'options sample_indices embedding_indices aggregation_weights mode_override | eqLengthGuard [("N", [("sample_indices", length sample_indices), ("embedding_indices", length embedding_indices), ("aggregation_weights", length aggregation_weights)])] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs sample_indices, buildInputs embedding_indices, buildInputs aggregation_weights, buildInputs mode_override] buildOp [] (opDef "EnqueueTPUEmbeddingSparseTensorBatch" & opAttr "T1" .~ tensorType (undefined :: t1) & opAttr "T2" .~ tensorType (undefined :: t2) & opAttr "T3" .~ tensorType (undefined :: t3) & opAttr "N" .~ n & op'options & opInputs .~ op'inputs) where n = fromIntegral (length sample_indices) :: Int64 {- input_arg { name: "sample_indices" type_attr: "T1" number_attr: "N" } input_arg { name: "embedding_indices" type_attr: "T2" number_attr: "N" } input_arg { name: "aggregation_weights" type_attr: "T3" number_attr: "N" } input_arg { name: "mode_override" type: DT_STRING } attr { name: "T1" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "T2" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "T3" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "N" type: "int" has_minimum: true minimum: 1 } attr { name: "device_ordinal" type: "int" default_value { i: -1 } } attr { name: "combiners" type: "list(string)" default_value { list { } } } attr { name: "table_ids" type: "list(int)" } attr { name: "max_sequence_lengths" type: "list(int)" default_value { list { } } } -} -- | ensureShape :: forall v'1 t . (TensorType t) => Shape -- ^ __shape__ -> Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ ensureShape = ensureShape' id ensureShape' :: forall v'1 t . (TensorType t) => OpParams -> Shape -- ^ __shape__ -> Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ ensureShape' op'options shape input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "EnsureShape" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "shape" .~ shape & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "shape" type: "shape" } attr { name: "T" type: "type" } -} -- | enter :: forall v'1 t . (TensorType t) => Tensor v'1 t -- ^ __data__ -> Tensor Build t -- ^ __output__ enter = enter' id enter' :: forall v'1 t . (TensorType t) => OpParams -> Tensor v'1 t -- ^ __data__ -> Tensor Build t -- ^ __output__ enter' op'options data' | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs data'] return (opDef "Enter" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "data" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "frame_name" type: "string" } attr { name: "is_constant" type: "bool" default_value { b: false } } attr { name: "parallel_iterations" type: "int" default_value { i: 10 } } -} -- | equal :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Bool, Data.ByteString.ByteString, Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build Bool -- ^ __z__ equal = equal' id equal' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Bool, Data.ByteString.ByteString, Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build Bool -- ^ __z__ equal' op'options x y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y] return (opDef "Equal" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type: DT_BOOL } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_UINT8 type: DT_INT8 type: DT_INT16 type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_QUINT8 type: DT_QINT8 type: DT_QINT32 type: DT_STRING type: DT_BOOL type: DT_COMPLEX128 } } } -} -- | erf :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ erf = erf' id erf' :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ erf' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "Erf" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } -} -- | erfc :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ erfc = erfc' id erfc' :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ erfc' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "Erfc" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } -} -- | euclideanNorm :: forall v'1 v'2 t tidx . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 tidx -- ^ __reduction_indices__ -> Tensor Build t -- ^ __output__ euclideanNorm = euclideanNorm' id euclideanNorm' :: forall v'1 v'2 t tidx . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 tidx -- ^ __reduction_indices__ -> Tensor Build t -- ^ __output__ euclideanNorm' op'options input reduction_indices | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs reduction_indices] return (opDef "EuclideanNorm" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tidx" .~ tensorType (undefined :: tidx) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "reduction_indices" type_attr: "Tidx" } output_arg { name: "output" type_attr: "T" } attr { name: "keep_dims" type: "bool" default_value { b: false } } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tidx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | exit :: forall v'1 t . (TensorType t) => Tensor v'1 t -- ^ __data__ -> Tensor Build t -- ^ __output__ exit = exit' id exit' :: forall v'1 t . (TensorType t) => OpParams -> Tensor v'1 t -- ^ __data__ -> Tensor Build t -- ^ __output__ exit' op'options data' | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs data'] return (opDef "Exit" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "data" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } -} -- | exp :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ exp = exp' id exp' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ exp' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "Exp" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | expandDims :: forall v'1 v'2 t tdim . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tdim) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 tdim -- ^ __dim__ -> Tensor Build t -- ^ __output__ expandDims = expandDims' id expandDims' :: forall v'1 v'2 t tdim . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tdim) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 tdim -- ^ __dim__ -> Tensor Build t -- ^ __output__ expandDims' op'options input dim | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs dim] return (opDef "ExpandDims" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tdim" .~ tensorType (undefined :: tdim) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "dim" type_attr: "Tdim" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "Tdim" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | experimentalAssertNextDataset :: [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __transformations__ -> Tensor Build Variant -- ^ __handle__ experimentalAssertNextDataset = experimentalAssertNextDataset' id experimentalAssertNextDataset' :: OpParams -> [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __transformations__ -> Tensor Build Variant -- ^ __handle__ experimentalAssertNextDataset' op'options output_types input_dataset transformations | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_dataset, buildInputs transformations] return (opDef "ExperimentalAssertNextDataset" & opAttr "output_types" .~ output_types & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_dataset" type: DT_VARIANT } input_arg { name: "transformations" type: DT_STRING } output_arg { name: "handle" type: DT_VARIANT } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } -} -- | experimentalAutoShardDataset :: [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor v'2 Data.Int.Int64 -- ^ __num_workers__ -> Tensor v'3 Data.Int.Int64 -- ^ __index__ -> Tensor Build Variant -- ^ __handle__ experimentalAutoShardDataset = experimentalAutoShardDataset' id experimentalAutoShardDataset' :: OpParams -> [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor v'2 Data.Int.Int64 -- ^ __num_workers__ -> Tensor v'3 Data.Int.Int64 -- ^ __index__ -> Tensor Build Variant -- ^ __handle__ experimentalAutoShardDataset' op'options output_types input_dataset num_workers index | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_dataset, buildInputs num_workers, buildInputs index] return (opDef "ExperimentalAutoShardDataset" & opAttr "output_types" .~ output_types & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_dataset" type: DT_VARIANT } input_arg { name: "num_workers" type: DT_INT64 } input_arg { name: "index" type: DT_INT64 } output_arg { name: "handle" type: DT_VARIANT } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } -} -- | experimentalBytesProducedStatsDataset :: [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __tag__ -> Tensor Build Variant -- ^ __handle__ experimentalBytesProducedStatsDataset = experimentalBytesProducedStatsDataset' id experimentalBytesProducedStatsDataset' :: OpParams -> [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __tag__ -> Tensor Build Variant -- ^ __handle__ experimentalBytesProducedStatsDataset' op'options output_types input_dataset tag | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_dataset, buildInputs tag] return (opDef "ExperimentalBytesProducedStatsDataset" & opAttr "output_types" .~ output_types & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_dataset" type: DT_VARIANT } input_arg { name: "tag" type: DT_STRING } output_arg { name: "handle" type: DT_VARIANT } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } -} -- | experimentalCSVDataset :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 output_types m' . (MonadBuild m', OneOfs '[Data.ByteString.ByteString, Data.Int.Int32, Data.Int.Int64, Double, Float] output_types) => Tensor v'1 Data.ByteString.ByteString -- ^ __filenames__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __compression_type__ -> Tensor v'3 Data.Int.Int64 -- ^ __buffer_size__ -> Tensor v'4 Bool -- ^ __header__ -> Tensor v'5 Data.ByteString.ByteString -- ^ __field_delim__ -> Tensor v'6 Bool -- ^ __use_quote_delim__ -> Tensor v'7 Data.ByteString.ByteString -- ^ __na_value__ -> Tensor v'8 Data.Int.Int64 -- ^ __select_cols__ -> TensorList (v'9) output_types -- ^ __record_defaults__ -> m' (Tensor Value Variant) -- ^ __handle__ experimentalCSVDataset = experimentalCSVDataset' id experimentalCSVDataset' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 output_types m' . (MonadBuild m', OneOfs '[Data.ByteString.ByteString, Data.Int.Int32, Data.Int.Int64, Double, Float] output_types) => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __filenames__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __compression_type__ -> Tensor v'3 Data.Int.Int64 -- ^ __buffer_size__ -> Tensor v'4 Bool -- ^ __header__ -> Tensor v'5 Data.ByteString.ByteString -- ^ __field_delim__ -> Tensor v'6 Bool -- ^ __use_quote_delim__ -> Tensor v'7 Data.ByteString.ByteString -- ^ __na_value__ -> Tensor v'8 Data.Int.Int64 -- ^ __select_cols__ -> TensorList (v'9) output_types -- ^ __record_defaults__ -> m' (Tensor Value Variant) -- ^ __handle__ experimentalCSVDataset' op'options filenames compression_type buffer_size header field_delim use_quote_delim na_value select_cols record_defaults | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs filenames, buildInputs compression_type, buildInputs buffer_size, buildInputs header, buildInputs field_delim, buildInputs use_quote_delim, buildInputs na_value, buildInputs select_cols, buildInputs record_defaults] buildOp [] (opDef "ExperimentalCSVDataset" & opAttr "output_types" .~ fromTensorTypes (Proxy :: Proxy output_types) & op'options & opInputs .~ op'inputs) {- input_arg { name: "filenames" type: DT_STRING } input_arg { name: "compression_type" type: DT_STRING } input_arg { name: "buffer_size" type: DT_INT64 } input_arg { name: "header" type: DT_BOOL } input_arg { name: "field_delim" type: DT_STRING } input_arg { name: "use_quote_delim" type: DT_BOOL } input_arg { name: "na_value" type: DT_STRING } input_arg { name: "select_cols" type: DT_INT64 } input_arg { name: "record_defaults" type_list_attr: "output_types" } output_arg { name: "handle" type: DT_VARIANT } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_STRING } } } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } -} -- | experimentalChooseFastestDataset :: Data.Int.Int64 -- ^ __num_experiments__ -> [DataType] -- ^ __output_types__ -> [Tensor v'1 Variant] -- ^ __input_datasets__ -> Tensor Build Variant -- ^ __handle__ experimentalChooseFastestDataset = experimentalChooseFastestDataset' id experimentalChooseFastestDataset' :: OpParams -> Data.Int.Int64 -- ^ __num_experiments__ -> [DataType] -- ^ __output_types__ -> [Tensor v'1 Variant] -- ^ __input_datasets__ -> Tensor Build Variant -- ^ __handle__ experimentalChooseFastestDataset' op'options num_experiments output_types input_datasets | eqLengthGuard [("N", [("input_datasets", length input_datasets)])] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_datasets] return (opDef "ExperimentalChooseFastestDataset" & opAttr "num_experiments" .~ num_experiments & opAttr "output_types" .~ output_types & opAttr "N" .~ n & op'options & opInputs .~ op'inputs) where n = fromIntegral (length input_datasets) :: Int64 {- input_arg { name: "input_datasets" type: DT_VARIANT number_attr: "N" } output_arg { name: "handle" type: DT_VARIANT } attr { name: "N" type: "int" has_minimum: true minimum: 2 } attr { name: "num_experiments" type: "int" } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } -} -- | experimentalDatasetCardinality :: Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor Build Data.Int.Int64 -- ^ __cardinality__ experimentalDatasetCardinality = experimentalDatasetCardinality' id experimentalDatasetCardinality' :: OpParams -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor Build Data.Int.Int64 -- ^ __cardinality__ experimentalDatasetCardinality' op'options input_dataset | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_dataset] return (opDef "ExperimentalDatasetCardinality" & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_dataset" type: DT_VARIANT } output_arg { name: "cardinality" type: DT_INT64 } -} -- | experimentalDatasetToTFRecord :: forall v'1 v'2 v'3 m' . (MonadBuild m') => Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __filename__ -> Tensor v'3 Data.ByteString.ByteString -- ^ __compression_type__ -> m' (ControlNode) experimentalDatasetToTFRecord = experimentalDatasetToTFRecord' id experimentalDatasetToTFRecord' :: forall v'1 v'2 v'3 m' . (MonadBuild m') => OpParams -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __filename__ -> Tensor v'3 Data.ByteString.ByteString -- ^ __compression_type__ -> m' (ControlNode) experimentalDatasetToTFRecord' op'options input_dataset filename compression_type | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_dataset, buildInputs filename, buildInputs compression_type] buildOp [] (opDef "ExperimentalDatasetToTFRecord" & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_dataset" type: DT_VARIANT } input_arg { name: "filename" type: DT_STRING } input_arg { name: "compression_type" type: DT_STRING } -} -- | experimentalDenseToSparseBatchDataset :: [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor v'2 Data.Int.Int64 -- ^ __batch_size__ -> Tensor v'3 Data.Int.Int64 -- ^ __row_shape__ -> Tensor Build Variant -- ^ __handle__ experimentalDenseToSparseBatchDataset = experimentalDenseToSparseBatchDataset' id experimentalDenseToSparseBatchDataset' :: OpParams -> [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor v'2 Data.Int.Int64 -- ^ __batch_size__ -> Tensor v'3 Data.Int.Int64 -- ^ __row_shape__ -> Tensor Build Variant -- ^ __handle__ experimentalDenseToSparseBatchDataset' op'options output_types input_dataset batch_size row_shape | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_dataset, buildInputs batch_size, buildInputs row_shape] return (opDef "ExperimentalDenseToSparseBatchDataset" & opAttr "output_types" .~ output_types & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_dataset" type: DT_VARIANT } input_arg { name: "batch_size" type: DT_INT64 } input_arg { name: "row_shape" type: DT_INT64 } output_arg { name: "handle" type: DT_VARIANT } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } -} -- | experimentalDirectedInterleaveDataset :: [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __selector_input_dataset__ -> [Tensor v'2 Variant] -- ^ __data_input_datasets__ -> Tensor Build Variant -- ^ __handle__ experimentalDirectedInterleaveDataset = experimentalDirectedInterleaveDataset' id experimentalDirectedInterleaveDataset' :: OpParams -> [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __selector_input_dataset__ -> [Tensor v'2 Variant] -- ^ __data_input_datasets__ -> Tensor Build Variant -- ^ __handle__ experimentalDirectedInterleaveDataset' op'options output_types selector_input_dataset data_input_datasets | eqLengthGuard [("N", [("data_input_datasets", length data_input_datasets)])] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs selector_input_dataset, buildInputs data_input_datasets] return (opDef "ExperimentalDirectedInterleaveDataset" & opAttr "output_types" .~ output_types & opAttr "N" .~ n & op'options & opInputs .~ op'inputs) where n = fromIntegral (length data_input_datasets) :: Int64 {- input_arg { name: "selector_input_dataset" type: DT_VARIANT } input_arg { name: "data_input_datasets" type: DT_VARIANT number_attr: "N" } output_arg { name: "handle" type: DT_VARIANT } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } attr { name: "N" type: "int" has_minimum: true minimum: 1 } -} -- | experimentalIdentityIndexedDataset :: forall v'1 m' . (MonadBuild m') => Tensor v'1 Data.Word.Word64 -- ^ __size__ -> m' (Tensor Value Variant) -- ^ __handle__ experimentalIdentityIndexedDataset = experimentalIdentityIndexedDataset' id experimentalIdentityIndexedDataset' :: forall v'1 m' . (MonadBuild m') => OpParams -> Tensor v'1 Data.Word.Word64 -- ^ __size__ -> m' (Tensor Value Variant) -- ^ __handle__ experimentalIdentityIndexedDataset' op'options size | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs size] buildOp [] (opDef "ExperimentalIdentityIndexedDataset" & op'options & opInputs .~ op'inputs) {- input_arg { name: "size" type: DT_UINT64 } output_arg { name: "handle" type: DT_VARIANT } -} -- | experimentalIgnoreErrorsDataset :: [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor Build Variant -- ^ __handle__ experimentalIgnoreErrorsDataset = experimentalIgnoreErrorsDataset' id experimentalIgnoreErrorsDataset' :: OpParams -> [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor Build Variant -- ^ __handle__ experimentalIgnoreErrorsDataset' op'options output_types input_dataset | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_dataset] return (opDef "ExperimentalIgnoreErrorsDataset" & opAttr "output_types" .~ output_types & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_dataset" type: DT_VARIANT } output_arg { name: "handle" type: DT_VARIANT } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } -} -- | experimentalIndexedDatasetGet :: forall v'1 v'2 output_types m' . (MonadBuild m', TensorTypes output_types) => Tensor v'1 ResourceHandle -- ^ __materialized__ -> Tensor v'2 Data.Word.Word64 -- ^ __index__ -> m' (TensorList (Value) output_types) -- ^ __components__ experimentalIndexedDatasetGet = experimentalIndexedDatasetGet' id experimentalIndexedDatasetGet' :: forall v'1 v'2 output_types m' . (MonadBuild m', TensorTypes output_types) => OpParams -> Tensor v'1 ResourceHandle -- ^ __materialized__ -> Tensor v'2 Data.Word.Word64 -- ^ __index__ -> m' (TensorList (Value) output_types) -- ^ __components__ experimentalIndexedDatasetGet' op'options materialized index | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs materialized, buildInputs index] buildOp [] (opDef "ExperimentalIndexedDatasetGet" & opAttr "output_types" .~ fromTensorTypes (Proxy :: Proxy output_types) & op'options & opInputs .~ op'inputs) {- input_arg { name: "materialized" type: DT_RESOURCE } input_arg { name: "index" type: DT_UINT64 } output_arg { name: "components" type_list_attr: "output_types" } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } -} -- | experimentalIndexedDatasetMaterialize :: forall v'1 v'2 m' . (MonadBuild m') => Tensor v'1 Variant -- ^ __dataset__ -> Tensor v'2 ResourceHandle -- ^ __materialized__ -> m' (ControlNode) experimentalIndexedDatasetMaterialize = experimentalIndexedDatasetMaterialize' id experimentalIndexedDatasetMaterialize' :: forall v'1 v'2 m' . (MonadBuild m') => OpParams -> Tensor v'1 Variant -- ^ __dataset__ -> Tensor v'2 ResourceHandle -- ^ __materialized__ -> m' (ControlNode) experimentalIndexedDatasetMaterialize' op'options dataset materialized | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs dataset, buildInputs materialized] buildOp [] (opDef "ExperimentalIndexedDatasetMaterialize" & op'options & opInputs .~ op'inputs) {- input_arg { name: "dataset" type: DT_VARIANT } input_arg { name: "materialized" type: DT_RESOURCE } -} -- | experimentalIteratorGetDevice :: forall v'1 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __resource__ -> m' (Tensor Value Data.ByteString.ByteString) -- ^ __device__ experimentalIteratorGetDevice = experimentalIteratorGetDevice' id experimentalIteratorGetDevice' :: forall v'1 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __resource__ -> m' (Tensor Value Data.ByteString.ByteString) -- ^ __device__ experimentalIteratorGetDevice' op'options resource | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs resource] buildOp [] (opDef "ExperimentalIteratorGetDevice" & op'options & opInputs .~ op'inputs) {- input_arg { name: "resource" type: DT_RESOURCE } output_arg { name: "device" type: DT_STRING } -} -- | experimentalLMDBDataset :: forall v'1 m' . (MonadBuild m') => [DataType] -- ^ __output_types__ -> Tensor v'1 Data.ByteString.ByteString -- ^ __filenames__ -> m' (Tensor Value Variant) -- ^ __handle__ experimentalLMDBDataset = experimentalLMDBDataset' id experimentalLMDBDataset' :: forall v'1 m' . (MonadBuild m') => OpParams -> [DataType] -- ^ __output_types__ -> Tensor v'1 Data.ByteString.ByteString -- ^ __filenames__ -> m' (Tensor Value Variant) -- ^ __handle__ experimentalLMDBDataset' op'options output_types filenames | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs filenames] buildOp [] (opDef "ExperimentalLMDBDataset" & opAttr "output_types" .~ output_types & op'options & opInputs .~ op'inputs) {- input_arg { name: "filenames" type: DT_STRING } output_arg { name: "handle" type: DT_VARIANT } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } -} -- | experimentalLatencyStatsDataset :: [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __tag__ -> Tensor Build Variant -- ^ __handle__ experimentalLatencyStatsDataset = experimentalLatencyStatsDataset' id experimentalLatencyStatsDataset' :: OpParams -> [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __tag__ -> Tensor Build Variant -- ^ __handle__ experimentalLatencyStatsDataset' op'options output_types input_dataset tag | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_dataset, buildInputs tag] return (opDef "ExperimentalLatencyStatsDataset" & opAttr "output_types" .~ output_types & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_dataset" type: DT_VARIANT } input_arg { name: "tag" type: DT_STRING } output_arg { name: "handle" type: DT_VARIANT } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } -} -- | experimentalMatchingFilesDataset :: forall v'1 m' . (MonadBuild m') => Tensor v'1 Data.ByteString.ByteString -- ^ __patterns__ -> m' (Tensor Value Variant) -- ^ __handle__ experimentalMatchingFilesDataset = experimentalMatchingFilesDataset' id experimentalMatchingFilesDataset' :: forall v'1 m' . (MonadBuild m') => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __patterns__ -> m' (Tensor Value Variant) -- ^ __handle__ experimentalMatchingFilesDataset' op'options patterns | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs patterns] buildOp [] (opDef "ExperimentalMatchingFilesDataset" & op'options & opInputs .~ op'inputs) {- input_arg { name: "patterns" type: DT_STRING } output_arg { name: "handle" type: DT_VARIANT } -} -- | experimentalMaterializedIndexDatasetHandle :: forall m' . (MonadBuild m') => [DataType] -- ^ __output_types__ -> m' (Tensor Value ResourceHandle) -- ^ __handle__ experimentalMaterializedIndexDatasetHandle = experimentalMaterializedIndexDatasetHandle' id experimentalMaterializedIndexDatasetHandle' :: forall m' . (MonadBuild m') => OpParams -> [DataType] -- ^ __output_types__ -> m' (Tensor Value ResourceHandle) -- ^ __handle__ experimentalMaterializedIndexDatasetHandle' op'options output_types | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "ExperimentalMaterializedIndexDatasetHandle" & opAttr "output_types" .~ output_types & op'options & opInputs .~ op'inputs) {- output_arg { name: "handle" type: DT_RESOURCE } attr { name: "container" type: "string" } attr { name: "shared_name" type: "string" } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } -} -- | experimentalMaxIntraOpParallelismDataset :: [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor v'2 Data.Int.Int64 -- ^ __max_intra_op_parallelism__ -> Tensor Build Variant -- ^ __handle__ experimentalMaxIntraOpParallelismDataset = experimentalMaxIntraOpParallelismDataset' id experimentalMaxIntraOpParallelismDataset' :: OpParams -> [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor v'2 Data.Int.Int64 -- ^ __max_intra_op_parallelism__ -> Tensor Build Variant -- ^ __handle__ experimentalMaxIntraOpParallelismDataset' op'options output_types input_dataset max_intra_op_parallelism | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_dataset, buildInputs max_intra_op_parallelism] return (opDef "ExperimentalMaxIntraOpParallelismDataset" & opAttr "output_types" .~ output_types & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_dataset" type: DT_VARIANT } input_arg { name: "max_intra_op_parallelism" type: DT_INT64 } output_arg { name: "handle" type: DT_VARIANT } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } -} -- | experimentalNonSerializableDataset :: [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor Build Variant -- ^ __handle__ experimentalNonSerializableDataset = experimentalNonSerializableDataset' id experimentalNonSerializableDataset' :: OpParams -> [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor Build Variant -- ^ __handle__ experimentalNonSerializableDataset' op'options output_types input_dataset | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_dataset] return (opDef "ExperimentalNonSerializableDataset" & opAttr "output_types" .~ output_types & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_dataset" type: DT_VARIANT } output_arg { name: "handle" type: DT_VARIANT } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } -} -- | experimentalParseExampleDataset :: forall v'1 v'2 v'3 tdense . (OneOfs '[Data.ByteString.ByteString, Data.Int.Int64, Float] tdense) => [DataType] -- ^ __output_types__ -> [DataType] -- ^ __sparse_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor v'2 Data.Int.Int64 -- ^ __num_parallel_calls__ -> TensorList (v'3) tdense -- ^ __dense_defaults__ -> Tensor Build Variant -- ^ __handle__ experimentalParseExampleDataset = experimentalParseExampleDataset' id experimentalParseExampleDataset' :: forall v'1 v'2 v'3 tdense . (OneOfs '[Data.ByteString.ByteString, Data.Int.Int64, Float] tdense) => OpParams -> [DataType] -- ^ __output_types__ -> [DataType] -- ^ __sparse_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor v'2 Data.Int.Int64 -- ^ __num_parallel_calls__ -> TensorList (v'3) tdense -- ^ __dense_defaults__ -> Tensor Build Variant -- ^ __handle__ experimentalParseExampleDataset' op'options output_types sparse_types input_dataset num_parallel_calls dense_defaults | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_dataset, buildInputs num_parallel_calls, buildInputs dense_defaults] return (opDef "ExperimentalParseExampleDataset" & opAttr "Tdense" .~ fromTensorTypes (Proxy :: Proxy tdense) & opAttr "output_types" .~ output_types & opAttr "sparse_types" .~ sparse_types & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_dataset" type: DT_VARIANT } input_arg { name: "num_parallel_calls" type: DT_INT64 } input_arg { name: "dense_defaults" type_list_attr: "Tdense" } output_arg { name: "handle" type: DT_VARIANT } attr { name: "sparse_keys" type: "list(string)" has_minimum: true } attr { name: "dense_keys" type: "list(string)" has_minimum: true } attr { name: "sparse_types" type: "list(type)" has_minimum: true allowed_values { list { type: DT_FLOAT type: DT_INT64 type: DT_STRING } } } attr { name: "Tdense" type: "list(type)" has_minimum: true allowed_values { list { type: DT_FLOAT type: DT_INT64 type: DT_STRING } } } attr { name: "dense_shapes" type: "list(shape)" has_minimum: true } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } attr { name: "sloppy" type: "bool" default_value { b: false } } -} -- | experimentalPrivateThreadPoolDataset :: [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor v'2 Data.Int.Int64 -- ^ __num_threads__ -> Tensor Build Variant -- ^ __handle__ experimentalPrivateThreadPoolDataset = experimentalPrivateThreadPoolDataset' id experimentalPrivateThreadPoolDataset' :: OpParams -> [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor v'2 Data.Int.Int64 -- ^ __num_threads__ -> Tensor Build Variant -- ^ __handle__ experimentalPrivateThreadPoolDataset' op'options output_types input_dataset num_threads | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_dataset, buildInputs num_threads] return (opDef "ExperimentalPrivateThreadPoolDataset" & opAttr "output_types" .~ output_types & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_dataset" type: DT_VARIANT } input_arg { name: "num_threads" type: DT_INT64 } output_arg { name: "handle" type: DT_VARIANT } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } -} -- | experimentalRandomDataset :: forall v'1 v'2 m' . (MonadBuild m') => [DataType] -- ^ __output_types__ -> Tensor v'1 Data.Int.Int64 -- ^ __seed__ -> Tensor v'2 Data.Int.Int64 -- ^ __seed2__ -> m' (Tensor Value Variant) -- ^ __handle__ experimentalRandomDataset = experimentalRandomDataset' id experimentalRandomDataset' :: forall v'1 v'2 m' . (MonadBuild m') => OpParams -> [DataType] -- ^ __output_types__ -> Tensor v'1 Data.Int.Int64 -- ^ __seed__ -> Tensor v'2 Data.Int.Int64 -- ^ __seed2__ -> m' (Tensor Value Variant) -- ^ __handle__ experimentalRandomDataset' op'options output_types seed seed2 | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs seed, buildInputs seed2] buildOp [] (opDef "ExperimentalRandomDataset" & opAttr "output_types" .~ output_types & op'options & opInputs .~ op'inputs) {- input_arg { name: "seed" type: DT_INT64 } input_arg { name: "seed2" type: DT_INT64 } output_arg { name: "handle" type: DT_VARIANT } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } -} -- | experimentalRebatchDataset :: [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor v'2 Data.Int.Int64 -- ^ __num_workers__ -> Tensor Build Variant -- ^ __handle__ experimentalRebatchDataset = experimentalRebatchDataset' id experimentalRebatchDataset' :: OpParams -> [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor v'2 Data.Int.Int64 -- ^ __num_workers__ -> Tensor Build Variant -- ^ __handle__ experimentalRebatchDataset' op'options output_types input_dataset num_workers | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_dataset, buildInputs num_workers] return (opDef "ExperimentalRebatchDataset" & opAttr "output_types" .~ output_types & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_dataset" type: DT_VARIANT } input_arg { name: "num_workers" type: DT_INT64 } output_arg { name: "handle" type: DT_VARIANT } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } -} -- | experimentalSetStatsAggregatorDataset :: forall v'1 v'2 v'3 v'4 m' . (MonadBuild m') => [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor v'2 ResourceHandle -- ^ __stats_aggregator__ -> Tensor v'3 Data.ByteString.ByteString -- ^ __tag__ -> Tensor v'4 Data.ByteString.ByteString -- ^ __counter_prefix__ -> m' (Tensor Value Variant) -- ^ __handle__ experimentalSetStatsAggregatorDataset = experimentalSetStatsAggregatorDataset' id experimentalSetStatsAggregatorDataset' :: forall v'1 v'2 v'3 v'4 m' . (MonadBuild m') => OpParams -> [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor v'2 ResourceHandle -- ^ __stats_aggregator__ -> Tensor v'3 Data.ByteString.ByteString -- ^ __tag__ -> Tensor v'4 Data.ByteString.ByteString -- ^ __counter_prefix__ -> m' (Tensor Value Variant) -- ^ __handle__ experimentalSetStatsAggregatorDataset' op'options output_types input_dataset stats_aggregator tag counter_prefix | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_dataset, buildInputs stats_aggregator, buildInputs tag, buildInputs counter_prefix] buildOp [] (opDef "ExperimentalSetStatsAggregatorDataset" & opAttr "output_types" .~ output_types & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_dataset" type: DT_VARIANT } input_arg { name: "stats_aggregator" type: DT_RESOURCE } input_arg { name: "tag" type: DT_STRING } input_arg { name: "counter_prefix" type: DT_STRING } output_arg { name: "handle" type: DT_VARIANT } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } -} -- | experimentalSleepDataset :: [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor v'2 Data.Int.Int64 -- ^ __sleep_microseconds__ -> Tensor Build Variant -- ^ __handle__ experimentalSleepDataset = experimentalSleepDataset' id experimentalSleepDataset' :: OpParams -> [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor v'2 Data.Int.Int64 -- ^ __sleep_microseconds__ -> Tensor Build Variant -- ^ __handle__ experimentalSleepDataset' op'options output_types input_dataset sleep_microseconds | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_dataset, buildInputs sleep_microseconds] return (opDef "ExperimentalSleepDataset" & opAttr "output_types" .~ output_types & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_dataset" type: DT_VARIANT } input_arg { name: "sleep_microseconds" type: DT_INT64 } output_arg { name: "handle" type: DT_VARIANT } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } -} -- | experimentalSlidingWindowDataset :: [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor v'2 Data.Int.Int64 -- ^ __window_size__ -> Tensor v'3 Data.Int.Int64 -- ^ __window_shift__ -> Tensor v'4 Data.Int.Int64 -- ^ __window_stride__ -> Tensor Build Variant -- ^ __handle__ experimentalSlidingWindowDataset = experimentalSlidingWindowDataset' id experimentalSlidingWindowDataset' :: OpParams -> [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor v'2 Data.Int.Int64 -- ^ __window_size__ -> Tensor v'3 Data.Int.Int64 -- ^ __window_shift__ -> Tensor v'4 Data.Int.Int64 -- ^ __window_stride__ -> Tensor Build Variant -- ^ __handle__ experimentalSlidingWindowDataset' op'options output_types input_dataset window_size window_shift window_stride | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_dataset, buildInputs window_size, buildInputs window_shift, buildInputs window_stride] return (opDef "ExperimentalSlidingWindowDataset" & opAttr "output_types" .~ output_types & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_dataset" type: DT_VARIANT } input_arg { name: "window_size" type: DT_INT64 } input_arg { name: "window_shift" type: DT_INT64 } input_arg { name: "window_stride" type: DT_INT64 } output_arg { name: "handle" type: DT_VARIANT } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } -} -- | experimentalSqlDataset :: forall v'1 v'2 v'3 m' . (MonadBuild m') => [DataType] -- ^ __output_types__ -> Tensor v'1 Data.ByteString.ByteString -- ^ __driver_name__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __data_source_name__ -> Tensor v'3 Data.ByteString.ByteString -- ^ __query__ -> m' (Tensor Value Variant) -- ^ __handle__ experimentalSqlDataset = experimentalSqlDataset' id experimentalSqlDataset' :: forall v'1 v'2 v'3 m' . (MonadBuild m') => OpParams -> [DataType] -- ^ __output_types__ -> Tensor v'1 Data.ByteString.ByteString -- ^ __driver_name__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __data_source_name__ -> Tensor v'3 Data.ByteString.ByteString -- ^ __query__ -> m' (Tensor Value Variant) -- ^ __handle__ experimentalSqlDataset' op'options output_types driver_name data_source_name query | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs driver_name, buildInputs data_source_name, buildInputs query] buildOp [] (opDef "ExperimentalSqlDataset" & opAttr "output_types" .~ output_types & op'options & opInputs .~ op'inputs) {- input_arg { name: "driver_name" type: DT_STRING } input_arg { name: "data_source_name" type: DT_STRING } input_arg { name: "query" type: DT_STRING } output_arg { name: "handle" type: DT_VARIANT } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } -} -- | experimentalStatsAggregatorHandle :: forall m' . (MonadBuild m') => m' (Tensor Value ResourceHandle) -- ^ __handle__ experimentalStatsAggregatorHandle = experimentalStatsAggregatorHandle' id experimentalStatsAggregatorHandle' :: forall m' . (MonadBuild m') => OpParams -> m' (Tensor Value ResourceHandle) -- ^ __handle__ experimentalStatsAggregatorHandle' op'options | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "ExperimentalStatsAggregatorHandle" & op'options & opInputs .~ op'inputs) {- output_arg { name: "handle" type: DT_RESOURCE } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } -} -- | experimentalStatsAggregatorSummary :: forall v'1 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __iterator__ -> m' (Tensor Value Data.ByteString.ByteString) -- ^ __summary__ experimentalStatsAggregatorSummary = experimentalStatsAggregatorSummary' id experimentalStatsAggregatorSummary' :: forall v'1 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __iterator__ -> m' (Tensor Value Data.ByteString.ByteString) -- ^ __summary__ experimentalStatsAggregatorSummary' op'options iterator | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs iterator] buildOp [] (opDef "ExperimentalStatsAggregatorSummary" & op'options & opInputs .~ op'inputs) {- input_arg { name: "iterator" type: DT_RESOURCE } output_arg { name: "summary" type: DT_STRING } -} -- | experimentalThreadPoolDataset :: forall v'1 v'2 m' . (MonadBuild m') => [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor v'2 ResourceHandle -- ^ __thread_pool__ -> m' (Tensor Value Variant) -- ^ __handle__ experimentalThreadPoolDataset = experimentalThreadPoolDataset' id experimentalThreadPoolDataset' :: forall v'1 v'2 m' . (MonadBuild m') => OpParams -> [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor v'2 ResourceHandle -- ^ __thread_pool__ -> m' (Tensor Value Variant) -- ^ __handle__ experimentalThreadPoolDataset' op'options output_types input_dataset thread_pool | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_dataset, buildInputs thread_pool] buildOp [] (opDef "ExperimentalThreadPoolDataset" & opAttr "output_types" .~ output_types & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_dataset" type: DT_VARIANT } input_arg { name: "thread_pool" type: DT_RESOURCE } output_arg { name: "handle" type: DT_VARIANT } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } -} -- | experimentalThreadPoolHandle :: forall m' . (MonadBuild m') => Data.Int.Int64 -- ^ __num_threads__ -> m' (Tensor Value ResourceHandle) -- ^ __handle__ experimentalThreadPoolHandle = experimentalThreadPoolHandle' id experimentalThreadPoolHandle' :: forall m' . (MonadBuild m') => OpParams -> Data.Int.Int64 -- ^ __num_threads__ -> m' (Tensor Value ResourceHandle) -- ^ __handle__ experimentalThreadPoolHandle' op'options num_threads | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "ExperimentalThreadPoolHandle" & opAttr "num_threads" .~ num_threads & op'options & opInputs .~ op'inputs) {- output_arg { name: "handle" type: DT_RESOURCE } attr { name: "num_threads" type: "int" } attr { name: "max_intra_op_parallelism" type: "int" default_value { i: 1 } } attr { name: "display_name" type: "string" } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } -} -- | experimentalUnbatchDataset :: [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor Build Variant -- ^ __handle__ experimentalUnbatchDataset = experimentalUnbatchDataset' id experimentalUnbatchDataset' :: OpParams -> [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor Build Variant -- ^ __handle__ experimentalUnbatchDataset' op'options output_types input_dataset | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_dataset] return (opDef "ExperimentalUnbatchDataset" & opAttr "output_types" .~ output_types & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_dataset" type: DT_VARIANT } output_arg { name: "handle" type: DT_VARIANT } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } -} -- | experimentalUniqueDataset :: [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor Build Variant -- ^ __handle__ experimentalUniqueDataset = experimentalUniqueDataset' id experimentalUniqueDataset' :: OpParams -> [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor Build Variant -- ^ __handle__ experimentalUniqueDataset' op'options output_types input_dataset | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_dataset] return (opDef "ExperimentalUniqueDataset" & opAttr "output_types" .~ output_types & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_dataset" type: DT_VARIANT } output_arg { name: "handle" type: DT_VARIANT } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } -} -- | expm1 :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ expm1 = expm1' id expm1' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ expm1' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "Expm1" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | extractGlimpse :: Tensor v'1 Float -- ^ __input__ -> Tensor v'2 Data.Int.Int32 -- ^ __size__ -> Tensor v'3 Float -- ^ __offsets__ -> Tensor Build Float -- ^ __glimpse__ extractGlimpse = extractGlimpse' id extractGlimpse' :: OpParams -> Tensor v'1 Float -- ^ __input__ -> Tensor v'2 Data.Int.Int32 -- ^ __size__ -> Tensor v'3 Float -- ^ __offsets__ -> Tensor Build Float -- ^ __glimpse__ extractGlimpse' op'options input size offsets | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs size, buildInputs offsets] return (opDef "ExtractGlimpse" & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type: DT_FLOAT } input_arg { name: "size" type: DT_INT32 } input_arg { name: "offsets" type: DT_FLOAT } output_arg { name: "glimpse" type: DT_FLOAT } attr { name: "centered" type: "bool" default_value { b: true } } attr { name: "normalized" type: "bool" default_value { b: true } } attr { name: "uniform_noise" type: "bool" default_value { b: true } } attr { name: "noise" type: "string" default_value { s: "uniform" } } -} -- | extractImagePatches :: forall v'1 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __images__ -> Tensor Build t -- ^ __patches__ extractImagePatches = extractImagePatches' id extractImagePatches' :: forall v'1 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __images__ -> Tensor Build t -- ^ __patches__ extractImagePatches' op'options images | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs images] return (opDef "ExtractImagePatches" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "images" type_attr: "T" } output_arg { name: "patches" type_attr: "T" } attr { name: "ksizes" type: "list(int)" has_minimum: true minimum: 4 } attr { name: "strides" type: "list(int)" has_minimum: true minimum: 4 } attr { name: "rates" type: "list(int)" has_minimum: true minimum: 4 } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_INT64 type: DT_BFLOAT16 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "padding" type: "string" allowed_values { list { s: "SAME" s: "VALID" } } } -} -- | extractJpegShape :: forall v'1 output_type . (OneOf '[Data.Int.Int32, Data.Int.Int64] output_type) => Tensor v'1 Data.ByteString.ByteString -- ^ __contents__ -> Tensor Build output_type -- ^ __image_shape__ extractJpegShape = extractJpegShape' id extractJpegShape' :: forall v'1 output_type . (OneOf '[Data.Int.Int32, Data.Int.Int64] output_type) => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __contents__ -> Tensor Build output_type -- ^ __image_shape__ extractJpegShape' op'options contents | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs contents] return (opDef "ExtractJpegShape" & opAttr "output_type" .~ tensorType (undefined :: output_type) & op'options & opInputs .~ op'inputs) {- input_arg { name: "contents" type: DT_STRING } output_arg { name: "image_shape" type_attr: "output_type" } attr { name: "output_type" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | extractVolumePatches :: forall v'1 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __patches__ extractVolumePatches = extractVolumePatches' id extractVolumePatches' :: forall v'1 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __patches__ extractVolumePatches' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "ExtractVolumePatches" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "patches" type_attr: "T" } attr { name: "ksizes" type: "list(int)" has_minimum: true minimum: 5 } attr { name: "strides" type: "list(int)" has_minimum: true minimum: 5 } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_INT64 type: DT_BFLOAT16 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "padding" type: "string" allowed_values { list { s: "SAME" s: "VALID" } } } -} -- | fFT :: forall v'1 tcomplex . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float)] tcomplex) => Tensor v'1 tcomplex -- ^ __input__ -> Tensor Build tcomplex -- ^ __output__ fFT = fFT' id fFT' :: forall v'1 tcomplex . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float)] tcomplex) => OpParams -> Tensor v'1 tcomplex -- ^ __input__ -> Tensor Build tcomplex -- ^ __output__ fFT' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "FFT" & opAttr "Tcomplex" .~ tensorType (undefined :: tcomplex) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "Tcomplex" } output_arg { name: "output" type_attr: "Tcomplex" } attr { name: "Tcomplex" type: "type" default_value { type: DT_COMPLEX64 } allowed_values { list { type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | fFT2D :: forall v'1 tcomplex . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float)] tcomplex) => Tensor v'1 tcomplex -- ^ __input__ -> Tensor Build tcomplex -- ^ __output__ fFT2D = fFT2D' id fFT2D' :: forall v'1 tcomplex . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float)] tcomplex) => OpParams -> Tensor v'1 tcomplex -- ^ __input__ -> Tensor Build tcomplex -- ^ __output__ fFT2D' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "FFT2D" & opAttr "Tcomplex" .~ tensorType (undefined :: tcomplex) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "Tcomplex" } output_arg { name: "output" type_attr: "Tcomplex" } attr { name: "Tcomplex" type: "type" default_value { type: DT_COMPLEX64 } allowed_values { list { type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | fFT3D :: forall v'1 tcomplex . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float)] tcomplex) => Tensor v'1 tcomplex -- ^ __input__ -> Tensor Build tcomplex -- ^ __output__ fFT3D = fFT3D' id fFT3D' :: forall v'1 tcomplex . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float)] tcomplex) => OpParams -> Tensor v'1 tcomplex -- ^ __input__ -> Tensor Build tcomplex -- ^ __output__ fFT3D' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "FFT3D" & opAttr "Tcomplex" .~ tensorType (undefined :: tcomplex) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "Tcomplex" } output_arg { name: "output" type_attr: "Tcomplex" } attr { name: "Tcomplex" type: "type" default_value { type: DT_COMPLEX64 } allowed_values { list { type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | fIFOQueue :: forall m' . (MonadBuild m') => [DataType] -- ^ __component_types__ -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __handle__ fIFOQueue = fIFOQueue' id fIFOQueue' :: forall m' . (MonadBuild m') => OpParams -> [DataType] -- ^ __component_types__ -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __handle__ fIFOQueue' op'options component_types | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "FIFOQueue" & opAttr "component_types" .~ component_types & op'options & opInputs .~ op'inputs) {- output_arg { name: "handle" type: DT_STRING is_ref: true } attr { name: "component_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "shapes" type: "list(shape)" default_value { list { } } has_minimum: true } attr { name: "capacity" type: "int" default_value { i: -1 } } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } -} -- | fIFOQueueV2 :: forall m' . (MonadBuild m') => [DataType] -- ^ __component_types__ -> m' (Tensor Value ResourceHandle) -- ^ __handle__ fIFOQueueV2 = fIFOQueueV2' id fIFOQueueV2' :: forall m' . (MonadBuild m') => OpParams -> [DataType] -- ^ __component_types__ -> m' (Tensor Value ResourceHandle) -- ^ __handle__ fIFOQueueV2' op'options component_types | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "FIFOQueueV2" & opAttr "component_types" .~ component_types & op'options & opInputs .~ op'inputs) {- output_arg { name: "handle" type: DT_RESOURCE } attr { name: "component_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "shapes" type: "list(shape)" default_value { list { } } has_minimum: true } attr { name: "capacity" type: "int" default_value { i: -1 } } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } -} -- | fact :: Tensor Build Data.ByteString.ByteString -- ^ __fact__ fact = fact' id fact' :: OpParams -> Tensor Build Data.ByteString.ByteString -- ^ __fact__ fact' op'options | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] return (opDef "Fact" & op'options & opInputs .~ op'inputs) {- output_arg { name: "fact" type: DT_STRING } -} -- | fakeParam :: forall dtype . (TensorType dtype) => Shape -- ^ __shape__ -> Tensor Build dtype -- ^ __output__ fakeParam = fakeParam' id fakeParam' :: forall dtype . (TensorType dtype) => OpParams -> Shape -- ^ __shape__ -> Tensor Build dtype -- ^ __output__ fakeParam' op'options shape | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] return (opDef "FakeParam" & opAttr "dtype" .~ tensorType (undefined :: dtype) & opAttr "shape" .~ shape & op'options & opInputs .~ op'inputs) {- output_arg { name: "output" type_attr: "dtype" } attr { name: "dtype" type: "type" } attr { name: "shape" type: "shape" } -} -- | fakeQuantWithMinMaxArgs :: Tensor v'1 Float -- ^ __inputs__ -> Tensor Build Float -- ^ __outputs__ fakeQuantWithMinMaxArgs = fakeQuantWithMinMaxArgs' id fakeQuantWithMinMaxArgs' :: OpParams -> Tensor v'1 Float -- ^ __inputs__ -> Tensor Build Float -- ^ __outputs__ fakeQuantWithMinMaxArgs' op'options inputs | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs inputs] return (opDef "FakeQuantWithMinMaxArgs" & op'options & opInputs .~ op'inputs) {- input_arg { name: "inputs" type: DT_FLOAT } output_arg { name: "outputs" type: DT_FLOAT } attr { name: "min" type: "float" default_value { f: -6.0 } } attr { name: "max" type: "float" default_value { f: 6.0 } } attr { name: "num_bits" type: "int" default_value { i: 8 } } attr { name: "narrow_range" type: "bool" default_value { b: false } } -} -- | fakeQuantWithMinMaxArgsGradient :: Tensor v'1 Float -- ^ __gradients__ -> Tensor v'2 Float -- ^ __inputs__ -> Tensor Build Float -- ^ __backprops__ fakeQuantWithMinMaxArgsGradient = fakeQuantWithMinMaxArgsGradient' id fakeQuantWithMinMaxArgsGradient' :: OpParams -> Tensor v'1 Float -- ^ __gradients__ -> Tensor v'2 Float -- ^ __inputs__ -> Tensor Build Float -- ^ __backprops__ fakeQuantWithMinMaxArgsGradient' op'options gradients inputs | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs gradients, buildInputs inputs] return (opDef "FakeQuantWithMinMaxArgsGradient" & op'options & opInputs .~ op'inputs) {- input_arg { name: "gradients" type: DT_FLOAT } input_arg { name: "inputs" type: DT_FLOAT } output_arg { name: "backprops" type: DT_FLOAT } attr { name: "min" type: "float" default_value { f: -6.0 } } attr { name: "max" type: "float" default_value { f: 6.0 } } attr { name: "num_bits" type: "int" default_value { i: 8 } } attr { name: "narrow_range" type: "bool" default_value { b: false } } -} -- | fakeQuantWithMinMaxVars :: Tensor v'1 Float -- ^ __inputs__ -> Tensor v'2 Float -- ^ __min__ -> Tensor v'3 Float -- ^ __max__ -> Tensor Build Float -- ^ __outputs__ fakeQuantWithMinMaxVars = fakeQuantWithMinMaxVars' id fakeQuantWithMinMaxVars' :: OpParams -> Tensor v'1 Float -- ^ __inputs__ -> Tensor v'2 Float -- ^ __min__ -> Tensor v'3 Float -- ^ __max__ -> Tensor Build Float -- ^ __outputs__ fakeQuantWithMinMaxVars' op'options inputs min max | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs inputs, buildInputs min, buildInputs max] return (opDef "FakeQuantWithMinMaxVars" & op'options & opInputs .~ op'inputs) {- input_arg { name: "inputs" type: DT_FLOAT } input_arg { name: "min" type: DT_FLOAT } input_arg { name: "max" type: DT_FLOAT } output_arg { name: "outputs" type: DT_FLOAT } attr { name: "num_bits" type: "int" default_value { i: 8 } } attr { name: "narrow_range" type: "bool" default_value { b: false } } -} -- | fakeQuantWithMinMaxVarsGradient :: Tensor v'1 Float -- ^ __gradients__ -> Tensor v'2 Float -- ^ __inputs__ -> Tensor v'3 Float -- ^ __min__ -> Tensor v'4 Float -- ^ __max__ -> (Tensor Build Float, Tensor Build Float, Tensor Build Float) -- ^ (__backprops_wrt_input__, __backprop_wrt_min__, __backprop_wrt_max__) -- -- * __backprops_wrt_input__ -- -- * __backprop_wrt_min__ -- -- * __backprop_wrt_max__ fakeQuantWithMinMaxVarsGradient = fakeQuantWithMinMaxVarsGradient' id fakeQuantWithMinMaxVarsGradient' :: OpParams -> Tensor v'1 Float -- ^ __gradients__ -> Tensor v'2 Float -- ^ __inputs__ -> Tensor v'3 Float -- ^ __min__ -> Tensor v'4 Float -- ^ __max__ -> (Tensor Build Float, Tensor Build Float, Tensor Build Float) -- ^ (__backprops_wrt_input__, __backprop_wrt_min__, __backprop_wrt_max__) -- -- * __backprops_wrt_input__ -- -- * __backprop_wrt_min__ -- -- * __backprop_wrt_max__ fakeQuantWithMinMaxVarsGradient' op'options gradients inputs min max | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs gradients, buildInputs inputs, buildInputs min, buildInputs max] return (opDef "FakeQuantWithMinMaxVarsGradient" & op'options & opInputs .~ op'inputs) {- input_arg { name: "gradients" type: DT_FLOAT } input_arg { name: "inputs" type: DT_FLOAT } input_arg { name: "min" type: DT_FLOAT } input_arg { name: "max" type: DT_FLOAT } output_arg { name: "backprops_wrt_input" type: DT_FLOAT } output_arg { name: "backprop_wrt_min" type: DT_FLOAT } output_arg { name: "backprop_wrt_max" type: DT_FLOAT } attr { name: "num_bits" type: "int" default_value { i: 8 } } attr { name: "narrow_range" type: "bool" default_value { b: false } } -} -- | fakeQuantWithMinMaxVarsPerChannel :: Tensor v'1 Float -- ^ __inputs__ -> Tensor v'2 Float -- ^ __min__ -> Tensor v'3 Float -- ^ __max__ -> Tensor Build Float -- ^ __outputs__ fakeQuantWithMinMaxVarsPerChannel = fakeQuantWithMinMaxVarsPerChannel' id fakeQuantWithMinMaxVarsPerChannel' :: OpParams -> Tensor v'1 Float -- ^ __inputs__ -> Tensor v'2 Float -- ^ __min__ -> Tensor v'3 Float -- ^ __max__ -> Tensor Build Float -- ^ __outputs__ fakeQuantWithMinMaxVarsPerChannel' op'options inputs min max | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs inputs, buildInputs min, buildInputs max] return (opDef "FakeQuantWithMinMaxVarsPerChannel" & op'options & opInputs .~ op'inputs) {- input_arg { name: "inputs" type: DT_FLOAT } input_arg { name: "min" type: DT_FLOAT } input_arg { name: "max" type: DT_FLOAT } output_arg { name: "outputs" type: DT_FLOAT } attr { name: "num_bits" type: "int" default_value { i: 8 } } attr { name: "narrow_range" type: "bool" default_value { b: false } } -} -- | fakeQuantWithMinMaxVarsPerChannelGradient :: Tensor v'1 Float -- ^ __gradients__ -> Tensor v'2 Float -- ^ __inputs__ -> Tensor v'3 Float -- ^ __min__ -> Tensor v'4 Float -- ^ __max__ -> (Tensor Build Float, Tensor Build Float, Tensor Build Float) -- ^ (__backprops_wrt_input__, __backprop_wrt_min__, __backprop_wrt_max__) -- -- * __backprops_wrt_input__ -- -- * __backprop_wrt_min__ -- -- * __backprop_wrt_max__ fakeQuantWithMinMaxVarsPerChannelGradient = fakeQuantWithMinMaxVarsPerChannelGradient' id fakeQuantWithMinMaxVarsPerChannelGradient' :: OpParams -> Tensor v'1 Float -- ^ __gradients__ -> Tensor v'2 Float -- ^ __inputs__ -> Tensor v'3 Float -- ^ __min__ -> Tensor v'4 Float -- ^ __max__ -> (Tensor Build Float, Tensor Build Float, Tensor Build Float) -- ^ (__backprops_wrt_input__, __backprop_wrt_min__, __backprop_wrt_max__) -- -- * __backprops_wrt_input__ -- -- * __backprop_wrt_min__ -- -- * __backprop_wrt_max__ fakeQuantWithMinMaxVarsPerChannelGradient' op'options gradients inputs min max | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs gradients, buildInputs inputs, buildInputs min, buildInputs max] return (opDef "FakeQuantWithMinMaxVarsPerChannelGradient" & op'options & opInputs .~ op'inputs) {- input_arg { name: "gradients" type: DT_FLOAT } input_arg { name: "inputs" type: DT_FLOAT } input_arg { name: "min" type: DT_FLOAT } input_arg { name: "max" type: DT_FLOAT } output_arg { name: "backprops_wrt_input" type: DT_FLOAT } output_arg { name: "backprop_wrt_min" type: DT_FLOAT } output_arg { name: "backprop_wrt_max" type: DT_FLOAT } attr { name: "num_bits" type: "int" default_value { i: 8 } } attr { name: "narrow_range" type: "bool" default_value { b: false } } -} -- | fakeQueue :: forall v'1 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __resource__ -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __handle__ fakeQueue = fakeQueue' id fakeQueue' :: forall v'1 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __resource__ -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __handle__ fakeQueue' op'options resource | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs resource] buildOp [] (opDef "FakeQueue" & op'options & opInputs .~ op'inputs) {- input_arg { name: "resource" type: DT_RESOURCE } output_arg { name: "handle" type: DT_STRING is_ref: true } -} -- | fill :: forall v'1 v'2 t index_type . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] index_type) => Tensor v'1 index_type -- ^ __dims__ -> Tensor v'2 t -- ^ __value__ -> Tensor Build t -- ^ __output__ fill = fill' id fill' :: forall v'1 v'2 t index_type . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] index_type) => OpParams -> Tensor v'1 index_type -- ^ __dims__ -> Tensor v'2 t -- ^ __value__ -> Tensor Build t -- ^ __output__ fill' op'options dims value | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs dims, buildInputs value] return (opDef "Fill" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "index_type" .~ tensorType (undefined :: index_type) & op'options & opInputs .~ op'inputs) {- input_arg { name: "dims" type_attr: "index_type" } input_arg { name: "value" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "index_type" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | filterByLastComponentDataset :: [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor Build Variant -- ^ __output__ filterByLastComponentDataset = filterByLastComponentDataset' id filterByLastComponentDataset' :: OpParams -> [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor Build Variant -- ^ __output__ filterByLastComponentDataset' op'options output_types input_dataset | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_dataset] return (opDef "FilterByLastComponentDataset" & opAttr "output_types" .~ output_types & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_dataset" type: DT_VARIANT } output_arg { name: "output" type: DT_VARIANT } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } -} -- | fingerprint :: forall v'1 v'2 t . (TensorType t) => Tensor v'1 t -- ^ __data__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __method__ -> Tensor Build Data.Word.Word8 -- ^ __fingerprint__ fingerprint = fingerprint' id fingerprint' :: forall v'1 v'2 t . (TensorType t) => OpParams -> Tensor v'1 t -- ^ __data__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __method__ -> Tensor Build Data.Word.Word8 -- ^ __fingerprint__ fingerprint' op'options data' method | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs data', buildInputs method] return (opDef "Fingerprint" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "data" type_attr: "T" } input_arg { name: "method" type: DT_STRING } output_arg { name: "fingerprint" type: DT_UINT8 } attr { name: "T" type: "type" } -} -- | fixedLengthRecordDataset :: forall v'1 v'2 v'3 v'4 v'5 m' . (MonadBuild m') => Tensor v'1 Data.ByteString.ByteString -- ^ __filenames__ -> Tensor v'2 Data.Int.Int64 -- ^ __header_bytes__ -> Tensor v'3 Data.Int.Int64 -- ^ __record_bytes__ -> Tensor v'4 Data.Int.Int64 -- ^ __footer_bytes__ -> Tensor v'5 Data.Int.Int64 -- ^ __buffer_size__ -> m' (Tensor Value Variant) -- ^ __handle__ fixedLengthRecordDataset = fixedLengthRecordDataset' id fixedLengthRecordDataset' :: forall v'1 v'2 v'3 v'4 v'5 m' . (MonadBuild m') => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __filenames__ -> Tensor v'2 Data.Int.Int64 -- ^ __header_bytes__ -> Tensor v'3 Data.Int.Int64 -- ^ __record_bytes__ -> Tensor v'4 Data.Int.Int64 -- ^ __footer_bytes__ -> Tensor v'5 Data.Int.Int64 -- ^ __buffer_size__ -> m' (Tensor Value Variant) -- ^ __handle__ fixedLengthRecordDataset' op'options filenames header_bytes record_bytes footer_bytes buffer_size | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs filenames, buildInputs header_bytes, buildInputs record_bytes, buildInputs footer_bytes, buildInputs buffer_size] buildOp [] (opDef "FixedLengthRecordDataset" & op'options & opInputs .~ op'inputs) {- input_arg { name: "filenames" type: DT_STRING } input_arg { name: "header_bytes" type: DT_INT64 } input_arg { name: "record_bytes" type: DT_INT64 } input_arg { name: "footer_bytes" type: DT_INT64 } input_arg { name: "buffer_size" type: DT_INT64 } output_arg { name: "handle" type: DT_VARIANT } -} -- | fixedLengthRecordDatasetV2 :: forall v'1 v'2 v'3 v'4 v'5 v'6 m' . (MonadBuild m') => Tensor v'1 Data.ByteString.ByteString -- ^ __filenames__ -> Tensor v'2 Data.Int.Int64 -- ^ __header_bytes__ -> Tensor v'3 Data.Int.Int64 -- ^ __record_bytes__ -> Tensor v'4 Data.Int.Int64 -- ^ __footer_bytes__ -> Tensor v'5 Data.Int.Int64 -- ^ __buffer_size__ -> Tensor v'6 Data.ByteString.ByteString -- ^ __compression_type__ -> m' (Tensor Value Variant) -- ^ __handle__ fixedLengthRecordDatasetV2 = fixedLengthRecordDatasetV2' id fixedLengthRecordDatasetV2' :: forall v'1 v'2 v'3 v'4 v'5 v'6 m' . (MonadBuild m') => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __filenames__ -> Tensor v'2 Data.Int.Int64 -- ^ __header_bytes__ -> Tensor v'3 Data.Int.Int64 -- ^ __record_bytes__ -> Tensor v'4 Data.Int.Int64 -- ^ __footer_bytes__ -> Tensor v'5 Data.Int.Int64 -- ^ __buffer_size__ -> Tensor v'6 Data.ByteString.ByteString -- ^ __compression_type__ -> m' (Tensor Value Variant) -- ^ __handle__ fixedLengthRecordDatasetV2' op'options filenames header_bytes record_bytes footer_bytes buffer_size compression_type | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs filenames, buildInputs header_bytes, buildInputs record_bytes, buildInputs footer_bytes, buildInputs buffer_size, buildInputs compression_type] buildOp [] (opDef "FixedLengthRecordDatasetV2" & op'options & opInputs .~ op'inputs) {- input_arg { name: "filenames" type: DT_STRING } input_arg { name: "header_bytes" type: DT_INT64 } input_arg { name: "record_bytes" type: DT_INT64 } input_arg { name: "footer_bytes" type: DT_INT64 } input_arg { name: "buffer_size" type: DT_INT64 } input_arg { name: "compression_type" type: DT_STRING } output_arg { name: "handle" type: DT_VARIANT } -} -- | fixedLengthRecordReader :: forall m' . (MonadBuild m') => Data.Int.Int64 -- ^ __record_bytes__ -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __reader_handle__ fixedLengthRecordReader = fixedLengthRecordReader' id fixedLengthRecordReader' :: forall m' . (MonadBuild m') => OpParams -> Data.Int.Int64 -- ^ __record_bytes__ -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __reader_handle__ fixedLengthRecordReader' op'options record_bytes | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "FixedLengthRecordReader" & opAttr "record_bytes" .~ record_bytes & op'options & opInputs .~ op'inputs) {- output_arg { name: "reader_handle" type: DT_STRING is_ref: true } attr { name: "header_bytes" type: "int" default_value { i: 0 } } attr { name: "record_bytes" type: "int" } attr { name: "footer_bytes" type: "int" default_value { i: 0 } } attr { name: "hop_bytes" type: "int" default_value { i: 0 } } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } -} -- | fixedLengthRecordReaderV2 :: forall m' . (MonadBuild m') => Data.Int.Int64 -- ^ __record_bytes__ -> m' (Tensor Value ResourceHandle) -- ^ __reader_handle__ fixedLengthRecordReaderV2 = fixedLengthRecordReaderV2' id fixedLengthRecordReaderV2' :: forall m' . (MonadBuild m') => OpParams -> Data.Int.Int64 -- ^ __record_bytes__ -> m' (Tensor Value ResourceHandle) -- ^ __reader_handle__ fixedLengthRecordReaderV2' op'options record_bytes | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "FixedLengthRecordReaderV2" & opAttr "record_bytes" .~ record_bytes & op'options & opInputs .~ op'inputs) {- output_arg { name: "reader_handle" type: DT_RESOURCE } attr { name: "header_bytes" type: "int" default_value { i: 0 } } attr { name: "record_bytes" type: "int" } attr { name: "footer_bytes" type: "int" default_value { i: 0 } } attr { name: "hop_bytes" type: "int" default_value { i: 0 } } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } attr { name: "encoding" type: "string" default_value { s: "" } } -} -- | fixedUnigramCandidateSampler :: forall v'1 m' . (MonadBuild m') => Data.Int.Int64 -- ^ __num_sampled__ -> Data.Int.Int64 -- ^ __num_true__ -> Data.Int.Int64 -- ^ __range_max__ -> Bool -- ^ __unique__ -> Tensor v'1 Data.Int.Int64 -- ^ __true_classes__ -> m' ((Tensor Value Data.Int.Int64, Tensor Value Float, Tensor Value Float)) -- ^ (__sampled_candidates__, __true_expected_count__, __sampled_expected_count__) -- -- * __sampled_candidates__ -- -- * __true_expected_count__ -- -- * __sampled_expected_count__ fixedUnigramCandidateSampler = fixedUnigramCandidateSampler' id fixedUnigramCandidateSampler' :: forall v'1 m' . (MonadBuild m') => OpParams -> Data.Int.Int64 -- ^ __num_sampled__ -> Data.Int.Int64 -- ^ __num_true__ -> Data.Int.Int64 -- ^ __range_max__ -> Bool -- ^ __unique__ -> Tensor v'1 Data.Int.Int64 -- ^ __true_classes__ -> m' ((Tensor Value Data.Int.Int64, Tensor Value Float, Tensor Value Float)) -- ^ (__sampled_candidates__, __true_expected_count__, __sampled_expected_count__) -- -- * __sampled_candidates__ -- -- * __true_expected_count__ -- -- * __sampled_expected_count__ fixedUnigramCandidateSampler' op'options num_sampled num_true range_max unique true_classes | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs true_classes] buildOp [] (opDef "FixedUnigramCandidateSampler" & opAttr "num_sampled" .~ num_sampled & opAttr "num_true" .~ num_true & opAttr "range_max" .~ range_max & opAttr "unique" .~ unique & op'options & opInputs .~ op'inputs) {- input_arg { name: "true_classes" type: DT_INT64 } output_arg { name: "sampled_candidates" type: DT_INT64 } output_arg { name: "true_expected_count" type: DT_FLOAT } output_arg { name: "sampled_expected_count" type: DT_FLOAT } attr { name: "num_true" type: "int" has_minimum: true minimum: 1 } attr { name: "num_sampled" type: "int" has_minimum: true minimum: 1 } attr { name: "unique" type: "bool" } attr { name: "range_max" type: "int" has_minimum: true minimum: 1 } attr { name: "vocab_file" type: "string" default_value { s: "" } } attr { name: "distortion" type: "float" default_value { f: 1.0 } } attr { name: "num_reserved_ids" type: "int" default_value { i: 0 } } attr { name: "num_shards" type: "int" default_value { i: 1 } has_minimum: true minimum: 1 } attr { name: "shard" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "unigrams" type: "list(float)" default_value { list { } } } attr { name: "seed" type: "int" default_value { i: 0 } } attr { name: "seed2" type: "int" default_value { i: 0 } } -} -- | floor :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ floor = floor' id floor' :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ floor' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "Floor" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } -} -- | floorDiv :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ floorDiv = floorDiv' id floorDiv' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ floorDiv' op'options x y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y] return (opDef "FloorDiv" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_UINT8 type: DT_INT8 type: DT_UINT16 type: DT_INT16 type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | floorMod :: forall v'1 v'2 t . (OneOf '[Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ floorMod = floorMod' id floorMod' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ floorMod' op'options x y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y] return (opDef "FloorMod" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } -} -- | flushSummaryWriter :: forall v'1 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __writer__ -> m' (ControlNode) flushSummaryWriter = flushSummaryWriter' id flushSummaryWriter' :: forall v'1 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __writer__ -> m' (ControlNode) flushSummaryWriter' op'options writer | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs writer] buildOp [] (opDef "FlushSummaryWriter" & op'options & opInputs .~ op'inputs) {- input_arg { name: "writer" type: DT_RESOURCE } -} -- | fractionalAvgPool :: forall v'1 t . (OneOf '[Data.Int.Int32, Data.Int.Int64, Double, Float] t) => Tensor v'1 t -- ^ __value__ -> (Tensor Build t, Tensor Build Data.Int.Int64, Tensor Build Data.Int.Int64) -- ^ (__output__, __row_pooling_sequence__, __col_pooling_sequence__) -- -- * __output__ -- -- * __row_pooling_sequence__ -- -- * __col_pooling_sequence__ fractionalAvgPool = fractionalAvgPool' id fractionalAvgPool' :: forall v'1 t . (OneOf '[Data.Int.Int32, Data.Int.Int64, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __value__ -> (Tensor Build t, Tensor Build Data.Int.Int64, Tensor Build Data.Int.Int64) -- ^ (__output__, __row_pooling_sequence__, __col_pooling_sequence__) -- -- * __output__ -- -- * __row_pooling_sequence__ -- -- * __col_pooling_sequence__ fractionalAvgPool' op'options value | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs value] return (opDef "FractionalAvgPool" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "value" type_attr: "T" } output_arg { name: "output" type_attr: "T" } output_arg { name: "row_pooling_sequence" type: DT_INT64 } output_arg { name: "col_pooling_sequence" type: DT_INT64 } attr { name: "pooling_ratio" type: "list(float)" has_minimum: true minimum: 4 } attr { name: "pseudo_random" type: "bool" default_value { b: false } } attr { name: "overlapping" type: "bool" default_value { b: false } } attr { name: "deterministic" type: "bool" default_value { b: false } } attr { name: "seed" type: "int" default_value { i: 0 } } attr { name: "seed2" type: "int" default_value { i: 0 } } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 } } } -} -- | fractionalAvgPoolGrad :: forall v'1 v'2 v'3 v'4 t . (OneOf '[Data.Int.Int32, Data.Int.Int64, Double, Float] t) => Tensor v'1 Data.Int.Int64 -- ^ __orig_input_tensor_shape__ -> Tensor v'2 t -- ^ __out_backprop__ -> Tensor v'3 Data.Int.Int64 -- ^ __row_pooling_sequence__ -> Tensor v'4 Data.Int.Int64 -- ^ __col_pooling_sequence__ -> Tensor Build t -- ^ __output__ fractionalAvgPoolGrad = fractionalAvgPoolGrad' id fractionalAvgPoolGrad' :: forall v'1 v'2 v'3 v'4 t . (OneOf '[Data.Int.Int32, Data.Int.Int64, Double, Float] t) => OpParams -> Tensor v'1 Data.Int.Int64 -- ^ __orig_input_tensor_shape__ -> Tensor v'2 t -- ^ __out_backprop__ -> Tensor v'3 Data.Int.Int64 -- ^ __row_pooling_sequence__ -> Tensor v'4 Data.Int.Int64 -- ^ __col_pooling_sequence__ -> Tensor Build t -- ^ __output__ fractionalAvgPoolGrad' op'options orig_input_tensor_shape out_backprop row_pooling_sequence col_pooling_sequence | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs orig_input_tensor_shape, buildInputs out_backprop, buildInputs row_pooling_sequence, buildInputs col_pooling_sequence] return (opDef "FractionalAvgPoolGrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "orig_input_tensor_shape" type: DT_INT64 } input_arg { name: "out_backprop" type_attr: "T" } input_arg { name: "row_pooling_sequence" type: DT_INT64 } input_arg { name: "col_pooling_sequence" type: DT_INT64 } output_arg { name: "output" type_attr: "T" } attr { name: "overlapping" type: "bool" default_value { b: false } } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 } } } -} -- | fractionalMaxPool :: forall v'1 t . (OneOf '[Data.Int.Int32, Data.Int.Int64, Double, Float] t) => Tensor v'1 t -- ^ __value__ -> (Tensor Build t, Tensor Build Data.Int.Int64, Tensor Build Data.Int.Int64) -- ^ (__output__, __row_pooling_sequence__, __col_pooling_sequence__) -- -- * __output__ -- -- * __row_pooling_sequence__ -- -- * __col_pooling_sequence__ fractionalMaxPool = fractionalMaxPool' id fractionalMaxPool' :: forall v'1 t . (OneOf '[Data.Int.Int32, Data.Int.Int64, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __value__ -> (Tensor Build t, Tensor Build Data.Int.Int64, Tensor Build Data.Int.Int64) -- ^ (__output__, __row_pooling_sequence__, __col_pooling_sequence__) -- -- * __output__ -- -- * __row_pooling_sequence__ -- -- * __col_pooling_sequence__ fractionalMaxPool' op'options value | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs value] return (opDef "FractionalMaxPool" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "value" type_attr: "T" } output_arg { name: "output" type_attr: "T" } output_arg { name: "row_pooling_sequence" type: DT_INT64 } output_arg { name: "col_pooling_sequence" type: DT_INT64 } attr { name: "pooling_ratio" type: "list(float)" has_minimum: true minimum: 4 } attr { name: "pseudo_random" type: "bool" default_value { b: false } } attr { name: "overlapping" type: "bool" default_value { b: false } } attr { name: "deterministic" type: "bool" default_value { b: false } } attr { name: "seed" type: "int" default_value { i: 0 } } attr { name: "seed2" type: "int" default_value { i: 0 } } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 } } } -} -- | fractionalMaxPoolGrad :: forall v'1 v'2 v'3 v'4 v'5 t . (OneOf '[Data.Int.Int32, Data.Int.Int64, Double, Float] t) => Tensor v'1 t -- ^ __orig_input__ -> Tensor v'2 t -- ^ __orig_output__ -> Tensor v'3 t -- ^ __out_backprop__ -> Tensor v'4 Data.Int.Int64 -- ^ __row_pooling_sequence__ -> Tensor v'5 Data.Int.Int64 -- ^ __col_pooling_sequence__ -> Tensor Build t -- ^ __output__ fractionalMaxPoolGrad = fractionalMaxPoolGrad' id fractionalMaxPoolGrad' :: forall v'1 v'2 v'3 v'4 v'5 t . (OneOf '[Data.Int.Int32, Data.Int.Int64, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __orig_input__ -> Tensor v'2 t -- ^ __orig_output__ -> Tensor v'3 t -- ^ __out_backprop__ -> Tensor v'4 Data.Int.Int64 -- ^ __row_pooling_sequence__ -> Tensor v'5 Data.Int.Int64 -- ^ __col_pooling_sequence__ -> Tensor Build t -- ^ __output__ fractionalMaxPoolGrad' op'options orig_input orig_output out_backprop row_pooling_sequence col_pooling_sequence | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs orig_input, buildInputs orig_output, buildInputs out_backprop, buildInputs row_pooling_sequence, buildInputs col_pooling_sequence] return (opDef "FractionalMaxPoolGrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "orig_input" type_attr: "T" } input_arg { name: "orig_output" type_attr: "T" } input_arg { name: "out_backprop" type_attr: "T" } input_arg { name: "row_pooling_sequence" type: DT_INT64 } input_arg { name: "col_pooling_sequence" type: DT_INT64 } output_arg { name: "output" type_attr: "T" } attr { name: "overlapping" type: "bool" default_value { b: false } } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 } } } -} -- | fusedBatchNorm :: forall v'1 v'2 v'3 v'4 v'5 t . (OneOf '[Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __scale__ -> Tensor v'3 t -- ^ __offset__ -> Tensor v'4 t -- ^ __mean__ -> Tensor v'5 t -- ^ __variance__ -> (Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t) -- ^ (__y__, __batch_mean__, __batch_variance__, __reserve_space_1__, __reserve_space_2__) -- -- * __y__ -- -- * __batch_mean__ -- -- * __batch_variance__ -- -- * __reserve_space_1__ -- -- * __reserve_space_2__ fusedBatchNorm = fusedBatchNorm' id fusedBatchNorm' :: forall v'1 v'2 v'3 v'4 v'5 t . (OneOf '[Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __scale__ -> Tensor v'3 t -- ^ __offset__ -> Tensor v'4 t -- ^ __mean__ -> Tensor v'5 t -- ^ __variance__ -> (Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t) -- ^ (__y__, __batch_mean__, __batch_variance__, __reserve_space_1__, __reserve_space_2__) -- -- * __y__ -- -- * __batch_mean__ -- -- * __batch_variance__ -- -- * __reserve_space_1__ -- -- * __reserve_space_2__ fusedBatchNorm' op'options x scale offset mean variance | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs scale, buildInputs offset, buildInputs mean, buildInputs variance] return (opDef "FusedBatchNorm" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "scale" type_attr: "T" } input_arg { name: "offset" type_attr: "T" } input_arg { name: "mean" type_attr: "T" } input_arg { name: "variance" type_attr: "T" } output_arg { name: "y" type_attr: "T" } output_arg { name: "batch_mean" type_attr: "T" } output_arg { name: "batch_variance" type_attr: "T" } output_arg { name: "reserve_space_1" type_attr: "T" } output_arg { name: "reserve_space_2" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT } } } attr { name: "epsilon" type: "float" default_value { f: 1.0e-4 } } attr { name: "data_format" type: "string" default_value { s: "NHWC" } allowed_values { list { s: "NHWC" s: "NCHW" } } } attr { name: "is_training" type: "bool" default_value { b: true } } -} -- | fusedBatchNormGrad :: forall v'1 v'2 v'3 v'4 v'5 t . (OneOf '[Float] t) => Tensor v'1 t -- ^ __y_backprop__ -> Tensor v'2 t -- ^ __x__ -> Tensor v'3 t -- ^ __scale__ -> Tensor v'4 t -- ^ __reserve_space_1__ -> Tensor v'5 t -- ^ __reserve_space_2__ -> (Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t) -- ^ (__x_backprop__, __scale_backprop__, __offset_backprop__, __reserve_space_3__, __reserve_space_4__) -- -- * __x_backprop__ -- -- * __scale_backprop__ -- -- * __offset_backprop__ -- -- * __reserve_space_3__ -- -- * __reserve_space_4__ fusedBatchNormGrad = fusedBatchNormGrad' id fusedBatchNormGrad' :: forall v'1 v'2 v'3 v'4 v'5 t . (OneOf '[Float] t) => OpParams -> Tensor v'1 t -- ^ __y_backprop__ -> Tensor v'2 t -- ^ __x__ -> Tensor v'3 t -- ^ __scale__ -> Tensor v'4 t -- ^ __reserve_space_1__ -> Tensor v'5 t -- ^ __reserve_space_2__ -> (Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t, Tensor Build t) -- ^ (__x_backprop__, __scale_backprop__, __offset_backprop__, __reserve_space_3__, __reserve_space_4__) -- -- * __x_backprop__ -- -- * __scale_backprop__ -- -- * __offset_backprop__ -- -- * __reserve_space_3__ -- -- * __reserve_space_4__ fusedBatchNormGrad' op'options y_backprop x scale reserve_space_1 reserve_space_2 | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs y_backprop, buildInputs x, buildInputs scale, buildInputs reserve_space_1, buildInputs reserve_space_2] return (opDef "FusedBatchNormGrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "y_backprop" type_attr: "T" } input_arg { name: "x" type_attr: "T" } input_arg { name: "scale" type_attr: "T" } input_arg { name: "reserve_space_1" type_attr: "T" } input_arg { name: "reserve_space_2" type_attr: "T" } output_arg { name: "x_backprop" type_attr: "T" } output_arg { name: "scale_backprop" type_attr: "T" } output_arg { name: "offset_backprop" type_attr: "T" } output_arg { name: "reserve_space_3" type_attr: "T" } output_arg { name: "reserve_space_4" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT } } } attr { name: "epsilon" type: "float" default_value { f: 1.0e-4 } } attr { name: "data_format" type: "string" default_value { s: "NHWC" } allowed_values { list { s: "NHWC" s: "NCHW" } } } attr { name: "is_training" type: "bool" default_value { b: true } } -} -- | fusedBatchNormGradV2 :: forall v'1 v'2 v'3 v'4 v'5 t u . (OneOf '[Data.Word.Word16, Float] t, OneOf '[Float] u) => Tensor v'1 t -- ^ __y_backprop__ -> Tensor v'2 t -- ^ __x__ -> Tensor v'3 Float -- ^ __scale__ -> Tensor v'4 u -- ^ __reserve_space_1__ -> Tensor v'5 u -- ^ __reserve_space_2__ -> (Tensor Build t, Tensor Build u, Tensor Build u, Tensor Build u, Tensor Build u) -- ^ (__x_backprop__, __scale_backprop__, __offset_backprop__, __reserve_space_3__, __reserve_space_4__) -- -- * __x_backprop__ -- -- * __scale_backprop__ -- -- * __offset_backprop__ -- -- * __reserve_space_3__ -- -- * __reserve_space_4__ fusedBatchNormGradV2 = fusedBatchNormGradV2' id fusedBatchNormGradV2' :: forall v'1 v'2 v'3 v'4 v'5 t u . (OneOf '[Data.Word.Word16, Float] t, OneOf '[Float] u) => OpParams -> Tensor v'1 t -- ^ __y_backprop__ -> Tensor v'2 t -- ^ __x__ -> Tensor v'3 Float -- ^ __scale__ -> Tensor v'4 u -- ^ __reserve_space_1__ -> Tensor v'5 u -- ^ __reserve_space_2__ -> (Tensor Build t, Tensor Build u, Tensor Build u, Tensor Build u, Tensor Build u) -- ^ (__x_backprop__, __scale_backprop__, __offset_backprop__, __reserve_space_3__, __reserve_space_4__) -- -- * __x_backprop__ -- -- * __scale_backprop__ -- -- * __offset_backprop__ -- -- * __reserve_space_3__ -- -- * __reserve_space_4__ fusedBatchNormGradV2' op'options y_backprop x scale reserve_space_1 reserve_space_2 | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs y_backprop, buildInputs x, buildInputs scale, buildInputs reserve_space_1, buildInputs reserve_space_2] return (opDef "FusedBatchNormGradV2" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "U" .~ tensorType (undefined :: u) & op'options & opInputs .~ op'inputs) {- input_arg { name: "y_backprop" type_attr: "T" } input_arg { name: "x" type_attr: "T" } input_arg { name: "scale" type: DT_FLOAT } input_arg { name: "reserve_space_1" type_attr: "U" } input_arg { name: "reserve_space_2" type_attr: "U" } output_arg { name: "x_backprop" type_attr: "T" } output_arg { name: "scale_backprop" type_attr: "U" } output_arg { name: "offset_backprop" type_attr: "U" } output_arg { name: "reserve_space_3" type_attr: "U" } output_arg { name: "reserve_space_4" type_attr: "U" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_BFLOAT16 type: DT_FLOAT } } } attr { name: "U" type: "type" allowed_values { list { type: DT_FLOAT } } } attr { name: "epsilon" type: "float" default_value { f: 1.0e-4 } } attr { name: "data_format" type: "string" default_value { s: "NHWC" } allowed_values { list { s: "NHWC" s: "NCHW" } } } attr { name: "is_training" type: "bool" default_value { b: true } } -} -- | fusedBatchNormGradV3 :: forall v'1 v'2 v'3 v'4 v'5 v'6 t u . (OneOf '[Data.Word.Word16, Float] t, OneOf '[Float] u) => Tensor v'1 t -- ^ __y_backprop__ -> Tensor v'2 t -- ^ __x__ -> Tensor v'3 Float -- ^ __scale__ -> Tensor v'4 u -- ^ __reserve_space_1__ -> Tensor v'5 u -- ^ __reserve_space_2__ -> Tensor v'6 u -- ^ __reserve_space_3__ -> (Tensor Build t, Tensor Build u, Tensor Build u, Tensor Build u, Tensor Build u) -- ^ (__x_backprop__, __scale_backprop__, __offset_backprop__, __reserve_space_4__, __reserve_space_5__) -- -- * __x_backprop__ -- -- * __scale_backprop__ -- -- * __offset_backprop__ -- -- * __reserve_space_4__ -- -- * __reserve_space_5__ fusedBatchNormGradV3 = fusedBatchNormGradV3' id fusedBatchNormGradV3' :: forall v'1 v'2 v'3 v'4 v'5 v'6 t u . (OneOf '[Data.Word.Word16, Float] t, OneOf '[Float] u) => OpParams -> Tensor v'1 t -- ^ __y_backprop__ -> Tensor v'2 t -- ^ __x__ -> Tensor v'3 Float -- ^ __scale__ -> Tensor v'4 u -- ^ __reserve_space_1__ -> Tensor v'5 u -- ^ __reserve_space_2__ -> Tensor v'6 u -- ^ __reserve_space_3__ -> (Tensor Build t, Tensor Build u, Tensor Build u, Tensor Build u, Tensor Build u) -- ^ (__x_backprop__, __scale_backprop__, __offset_backprop__, __reserve_space_4__, __reserve_space_5__) -- -- * __x_backprop__ -- -- * __scale_backprop__ -- -- * __offset_backprop__ -- -- * __reserve_space_4__ -- -- * __reserve_space_5__ fusedBatchNormGradV3' op'options y_backprop x scale reserve_space_1 reserve_space_2 reserve_space_3 | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs y_backprop, buildInputs x, buildInputs scale, buildInputs reserve_space_1, buildInputs reserve_space_2, buildInputs reserve_space_3] return (opDef "FusedBatchNormGradV3" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "U" .~ tensorType (undefined :: u) & op'options & opInputs .~ op'inputs) {- input_arg { name: "y_backprop" type_attr: "T" } input_arg { name: "x" type_attr: "T" } input_arg { name: "scale" type: DT_FLOAT } input_arg { name: "reserve_space_1" type_attr: "U" } input_arg { name: "reserve_space_2" type_attr: "U" } input_arg { name: "reserve_space_3" type_attr: "U" } output_arg { name: "x_backprop" type_attr: "T" } output_arg { name: "scale_backprop" type_attr: "U" } output_arg { name: "offset_backprop" type_attr: "U" } output_arg { name: "reserve_space_4" type_attr: "U" } output_arg { name: "reserve_space_5" type_attr: "U" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_BFLOAT16 type: DT_FLOAT } } } attr { name: "U" type: "type" allowed_values { list { type: DT_FLOAT } } } attr { name: "epsilon" type: "float" default_value { f: 1.0e-4 } } attr { name: "data_format" type: "string" default_value { s: "NHWC" } allowed_values { list { s: "NHWC" s: "NCHW" } } } attr { name: "is_training" type: "bool" default_value { b: true } } -} -- | fusedBatchNormV2 :: forall v'1 v'2 v'3 v'4 v'5 t u . (OneOf '[Data.Word.Word16, Float] t, OneOf '[Float] u) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 u -- ^ __scale__ -> Tensor v'3 u -- ^ __offset__ -> Tensor v'4 u -- ^ __mean__ -> Tensor v'5 u -- ^ __variance__ -> (Tensor Build t, Tensor Build u, Tensor Build u, Tensor Build u, Tensor Build u) -- ^ (__y__, __batch_mean__, __batch_variance__, __reserve_space_1__, __reserve_space_2__) -- -- * __y__ -- -- * __batch_mean__ -- -- * __batch_variance__ -- -- * __reserve_space_1__ -- -- * __reserve_space_2__ fusedBatchNormV2 = fusedBatchNormV2' id fusedBatchNormV2' :: forall v'1 v'2 v'3 v'4 v'5 t u . (OneOf '[Data.Word.Word16, Float] t, OneOf '[Float] u) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 u -- ^ __scale__ -> Tensor v'3 u -- ^ __offset__ -> Tensor v'4 u -- ^ __mean__ -> Tensor v'5 u -- ^ __variance__ -> (Tensor Build t, Tensor Build u, Tensor Build u, Tensor Build u, Tensor Build u) -- ^ (__y__, __batch_mean__, __batch_variance__, __reserve_space_1__, __reserve_space_2__) -- -- * __y__ -- -- * __batch_mean__ -- -- * __batch_variance__ -- -- * __reserve_space_1__ -- -- * __reserve_space_2__ fusedBatchNormV2' op'options x scale offset mean variance | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs scale, buildInputs offset, buildInputs mean, buildInputs variance] return (opDef "FusedBatchNormV2" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "U" .~ tensorType (undefined :: u) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "scale" type_attr: "U" } input_arg { name: "offset" type_attr: "U" } input_arg { name: "mean" type_attr: "U" } input_arg { name: "variance" type_attr: "U" } output_arg { name: "y" type_attr: "T" } output_arg { name: "batch_mean" type_attr: "U" } output_arg { name: "batch_variance" type_attr: "U" } output_arg { name: "reserve_space_1" type_attr: "U" } output_arg { name: "reserve_space_2" type_attr: "U" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_BFLOAT16 type: DT_FLOAT } } } attr { name: "U" type: "type" allowed_values { list { type: DT_FLOAT } } } attr { name: "epsilon" type: "float" default_value { f: 1.0e-4 } } attr { name: "data_format" type: "string" default_value { s: "NHWC" } allowed_values { list { s: "NHWC" s: "NCHW" } } } attr { name: "is_training" type: "bool" default_value { b: true } } -} -- | fusedBatchNormV3 :: forall v'1 v'2 v'3 v'4 v'5 t u . (OneOf '[Data.Word.Word16, Float] t, OneOf '[Float] u) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 u -- ^ __scale__ -> Tensor v'3 u -- ^ __offset__ -> Tensor v'4 u -- ^ __mean__ -> Tensor v'5 u -- ^ __variance__ -> (Tensor Build t, Tensor Build u, Tensor Build u, Tensor Build u, Tensor Build u, Tensor Build u) -- ^ (__y__, __batch_mean__, __batch_variance__, __reserve_space_1__, __reserve_space_2__, __reserve_space_3__) -- -- * __y__ -- -- * __batch_mean__ -- -- * __batch_variance__ -- -- * __reserve_space_1__ -- -- * __reserve_space_2__ -- -- * __reserve_space_3__ fusedBatchNormV3 = fusedBatchNormV3' id fusedBatchNormV3' :: forall v'1 v'2 v'3 v'4 v'5 t u . (OneOf '[Data.Word.Word16, Float] t, OneOf '[Float] u) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 u -- ^ __scale__ -> Tensor v'3 u -- ^ __offset__ -> Tensor v'4 u -- ^ __mean__ -> Tensor v'5 u -- ^ __variance__ -> (Tensor Build t, Tensor Build u, Tensor Build u, Tensor Build u, Tensor Build u, Tensor Build u) -- ^ (__y__, __batch_mean__, __batch_variance__, __reserve_space_1__, __reserve_space_2__, __reserve_space_3__) -- -- * __y__ -- -- * __batch_mean__ -- -- * __batch_variance__ -- -- * __reserve_space_1__ -- -- * __reserve_space_2__ -- -- * __reserve_space_3__ fusedBatchNormV3' op'options x scale offset mean variance | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs scale, buildInputs offset, buildInputs mean, buildInputs variance] return (opDef "FusedBatchNormV3" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "U" .~ tensorType (undefined :: u) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "scale" type_attr: "U" } input_arg { name: "offset" type_attr: "U" } input_arg { name: "mean" type_attr: "U" } input_arg { name: "variance" type_attr: "U" } output_arg { name: "y" type_attr: "T" } output_arg { name: "batch_mean" type_attr: "U" } output_arg { name: "batch_variance" type_attr: "U" } output_arg { name: "reserve_space_1" type_attr: "U" } output_arg { name: "reserve_space_2" type_attr: "U" } output_arg { name: "reserve_space_3" type_attr: "U" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_BFLOAT16 type: DT_FLOAT } } } attr { name: "U" type: "type" allowed_values { list { type: DT_FLOAT } } } attr { name: "epsilon" type: "float" default_value { f: 1.0e-4 } } attr { name: "data_format" type: "string" default_value { s: "NHWC" } allowed_values { list { s: "NHWC" s: "NCHW" } } } attr { name: "is_training" type: "bool" default_value { b: true } } -} -- | fusedPadConv2D :: forall v'1 v'2 v'3 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 Data.Int.Int32 -- ^ __paddings__ -> Tensor v'3 t -- ^ __filter__ -> Tensor Build t -- ^ __output__ fusedPadConv2D = fusedPadConv2D' id fusedPadConv2D' :: forall v'1 v'2 v'3 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 Data.Int.Int32 -- ^ __paddings__ -> Tensor v'3 t -- ^ __filter__ -> Tensor Build t -- ^ __output__ fusedPadConv2D' op'options input paddings filter | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs paddings, buildInputs filter] return (opDef "FusedPadConv2D" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "paddings" type: DT_INT32 } input_arg { name: "filter" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "mode" type: "string" allowed_values { list { s: "REFLECT" s: "SYMMETRIC" } } } attr { name: "strides" type: "list(int)" } attr { name: "padding" type: "string" allowed_values { list { s: "SAME" s: "VALID" } } } -} -- | fusedResizeAndPadConv2D :: forall v'1 v'2 v'3 v'4 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 Data.Int.Int32 -- ^ __size__ -> Tensor v'3 Data.Int.Int32 -- ^ __paddings__ -> Tensor v'4 t -- ^ __filter__ -> Tensor Build t -- ^ __output__ fusedResizeAndPadConv2D = fusedResizeAndPadConv2D' id fusedResizeAndPadConv2D' :: forall v'1 v'2 v'3 v'4 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 Data.Int.Int32 -- ^ __size__ -> Tensor v'3 Data.Int.Int32 -- ^ __paddings__ -> Tensor v'4 t -- ^ __filter__ -> Tensor Build t -- ^ __output__ fusedResizeAndPadConv2D' op'options input size paddings filter | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs size, buildInputs paddings, buildInputs filter] return (opDef "FusedResizeAndPadConv2D" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "size" type: DT_INT32 } input_arg { name: "paddings" type: DT_INT32 } input_arg { name: "filter" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "resize_align_corners" type: "bool" default_value { b: false } } attr { name: "mode" type: "string" allowed_values { list { s: "REFLECT" s: "SYMMETRIC" } } } attr { name: "strides" type: "list(int)" } attr { name: "padding" type: "string" allowed_values { list { s: "SAME" s: "VALID" } } } -} -- | gather :: forall v'1 v'2 tparams tindices . (TensorType tparams, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor v'1 tparams -- ^ __params__ -> Tensor v'2 tindices -- ^ __indices__ -> Tensor Build tparams -- ^ __output__ gather = gather' id gather' :: forall v'1 v'2 tparams tindices . (TensorType tparams, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor v'1 tparams -- ^ __params__ -> Tensor v'2 tindices -- ^ __indices__ -> Tensor Build tparams -- ^ __output__ gather' op'options params indices | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs params, buildInputs indices] return (opDef "Gather" & opAttr "Tparams" .~ tensorType (undefined :: tparams) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "params" type_attr: "Tparams" } input_arg { name: "indices" type_attr: "Tindices" } output_arg { name: "output" type_attr: "Tparams" } attr { name: "validate_indices" type: "bool" default_value { b: true } } attr { name: "Tparams" type: "type" } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | gatherNd :: forall v'1 v'2 tparams tindices . (TensorType tparams, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor v'1 tparams -- ^ __params__ -> Tensor v'2 tindices -- ^ __indices__ -> Tensor Build tparams -- ^ __output__ gatherNd = gatherNd' id gatherNd' :: forall v'1 v'2 tparams tindices . (TensorType tparams, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor v'1 tparams -- ^ __params__ -> Tensor v'2 tindices -- ^ __indices__ -> Tensor Build tparams -- ^ __output__ gatherNd' op'options params indices | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs params, buildInputs indices] return (opDef "GatherNd" & opAttr "Tparams" .~ tensorType (undefined :: tparams) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "params" type_attr: "Tparams" } input_arg { name: "indices" type_attr: "Tindices" } output_arg { name: "output" type_attr: "Tparams" } attr { name: "Tparams" type: "type" } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | gatherV2 :: forall v'1 v'2 v'3 tparams tindices taxis . (TensorType tparams, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices, OneOf '[Data.Int.Int32, Data.Int.Int64] taxis) => Tensor v'1 tparams -- ^ __params__ -> Tensor v'2 tindices -- ^ __indices__ -> Tensor v'3 taxis -- ^ __axis__ -> Tensor Build tparams -- ^ __output__ gatherV2 = gatherV2' id gatherV2' :: forall v'1 v'2 v'3 tparams tindices taxis . (TensorType tparams, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices, OneOf '[Data.Int.Int32, Data.Int.Int64] taxis) => OpParams -> Tensor v'1 tparams -- ^ __params__ -> Tensor v'2 tindices -- ^ __indices__ -> Tensor v'3 taxis -- ^ __axis__ -> Tensor Build tparams -- ^ __output__ gatherV2' op'options params indices axis | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs params, buildInputs indices, buildInputs axis] return (opDef "GatherV2" & opAttr "Tparams" .~ tensorType (undefined :: tparams) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & opAttr "Taxis" .~ tensorType (undefined :: taxis) & op'options & opInputs .~ op'inputs) {- input_arg { name: "params" type_attr: "Tparams" } input_arg { name: "indices" type_attr: "Tindices" } input_arg { name: "axis" type_attr: "Taxis" } output_arg { name: "output" type_attr: "Tparams" } attr { name: "batch_dims" type: "int" default_value { i: 0 } } attr { name: "Tparams" type: "type" } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "Taxis" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | Re-configures the GCS block cache with the new configuration values. -- -- If the values are the same as already configured values, this op is a no-op. If -- they are different, the current contents of the block cache is dropped, and a -- new block cache is created fresh. gcsConfigureBlockCache :: forall v'1 v'2 v'3 m' . (MonadBuild m') => Tensor v'1 Data.Word.Word64 -- ^ __max_cache_size__ -> Tensor v'2 Data.Word.Word64 -- ^ __block_size__ -> Tensor v'3 Data.Word.Word64 -- ^ __max_staleness__ -> m' (ControlNode) gcsConfigureBlockCache = gcsConfigureBlockCache' id gcsConfigureBlockCache' :: forall v'1 v'2 v'3 m' . (MonadBuild m') => OpParams -> Tensor v'1 Data.Word.Word64 -- ^ __max_cache_size__ -> Tensor v'2 Data.Word.Word64 -- ^ __block_size__ -> Tensor v'3 Data.Word.Word64 -- ^ __max_staleness__ -> m' (ControlNode) gcsConfigureBlockCache' op'options max_cache_size block_size max_staleness | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs max_cache_size, buildInputs block_size, buildInputs max_staleness] buildOp [] (opDef "GcsConfigureBlockCache" & op'options & opInputs .~ op'inputs) {- input_arg { name: "max_cache_size" type: DT_UINT64 } input_arg { name: "block_size" type: DT_UINT64 } input_arg { name: "max_staleness" type: DT_UINT64 } -} -- | Configures the credentials used by the GCS client of the local TF runtime. -- -- The json input can be of the format: -- -- 1. Refresh Token: -- { -- "client_id": "", -- "client_secret": "", -- "refresh_token: "", -- "type": "authorized_user", -- } -- -- 2. Service Account: -- { -- "type": "service_account", -- "project_id": "", -- "private_key_id": "", -- "private_key": "------BEGIN PRIVATE KEY-----\n\n-----END PRIVATE KEY------\n", -- "client_email": "@.iam.gserviceaccount.com", -- "client_id": "", -- # Some additional fields elided -- } -- -- Note the credentials established through this method are shared across all -- sessions run on this runtime. -- -- Note be sure to feed the inputs to this op to ensure the credentials are not -- stored in a constant op within the graph that might accidentally be checkpointed -- or in other ways be persisted or exfiltrated. gcsConfigureCredentials :: forall v'1 m' . (MonadBuild m') => Tensor v'1 Data.ByteString.ByteString -- ^ __json__ -> m' (ControlNode) gcsConfigureCredentials = gcsConfigureCredentials' id gcsConfigureCredentials' :: forall v'1 m' . (MonadBuild m') => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __json__ -> m' (ControlNode) gcsConfigureCredentials' op'options json | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs json] buildOp [] (opDef "GcsConfigureCredentials" & op'options & opInputs .~ op'inputs) {- input_arg { name: "json" type: DT_STRING } -} -- | Generates serialized partition messages suitable for batch reads. -- -- This op should not be used directly by clients. Instead, the -- bigquery_reader_ops.py file defines a clean interface to the reader. generateBigQueryReaderPartitions :: Data.Int.Int64 -- ^ __num_partitions__: Number of partitions to split the table into. -> Data.Int.Int64 -- ^ __timestamp_millis__: Table snapshot timestamp in millis since epoch. Relative -- (negative or zero) snapshot times are not allowed. For more details, see -- 'Table Decorators' in BigQuery docs. -> Tensor Build Data.ByteString.ByteString -- ^ __partitions__: Serialized table partitions. generateBigQueryReaderPartitions = generateBigQueryReaderPartitions' id generateBigQueryReaderPartitions' :: OpParams -> Data.Int.Int64 -- ^ __num_partitions__: Number of partitions to split the table into. -> Data.Int.Int64 -- ^ __timestamp_millis__: Table snapshot timestamp in millis since epoch. Relative -- (negative or zero) snapshot times are not allowed. For more details, see -- 'Table Decorators' in BigQuery docs. -> Tensor Build Data.ByteString.ByteString -- ^ __partitions__: Serialized table partitions. generateBigQueryReaderPartitions' op'options num_partitions timestamp_millis | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] return (opDef "GenerateBigQueryReaderPartitions" & opAttr "num_partitions" .~ num_partitions & opAttr "timestamp_millis" .~ timestamp_millis & op'options & opInputs .~ op'inputs) {- output_arg { name: "partitions" description: "Serialized table partitions." type: DT_STRING } attr { name: "project_id" type: "string" description: "GCP project ID." } attr { name: "dataset_id" type: "string" description: "BigQuery Dataset ID." } attr { name: "table_id" type: "string" description: "Table to read." } attr { name: "columns" type: "list(string)" description: "List of columns to read. Leave empty to read all columns." } attr { name: "timestamp_millis" type: "int" description: "Table snapshot timestamp in millis since epoch. Relative\n(negative or zero) snapshot times are not allowed. For more details, see\n\'Table Decorators\' in BigQuery docs." } attr { name: "num_partitions" type: "int" description: "Number of partitions to split the table into." } attr { name: "test_end_point" type: "string" default_value { s: "" } description: "Do not use. For testing purposes only." } -} -- | generateVocabRemapping :: Data.Int.Int64 -- ^ __new_vocab_offset__ -> Data.Int.Int64 -- ^ __num_new_vocab__ -> Tensor v'1 Data.ByteString.ByteString -- ^ __new_vocab_file__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __old_vocab_file__ -> (Tensor Build Data.Int.Int64, Tensor Build Data.Int.Int32) -- ^ (__remapping__, __num_present__) -- -- * __remapping__ -- -- * __num_present__ generateVocabRemapping = generateVocabRemapping' id generateVocabRemapping' :: OpParams -> Data.Int.Int64 -- ^ __new_vocab_offset__ -> Data.Int.Int64 -- ^ __num_new_vocab__ -> Tensor v'1 Data.ByteString.ByteString -- ^ __new_vocab_file__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __old_vocab_file__ -> (Tensor Build Data.Int.Int64, Tensor Build Data.Int.Int32) -- ^ (__remapping__, __num_present__) -- -- * __remapping__ -- -- * __num_present__ generateVocabRemapping' op'options new_vocab_offset num_new_vocab new_vocab_file old_vocab_file | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs new_vocab_file, buildInputs old_vocab_file] return (opDef "GenerateVocabRemapping" & opAttr "new_vocab_offset" .~ new_vocab_offset & opAttr "num_new_vocab" .~ num_new_vocab & op'options & opInputs .~ op'inputs) {- input_arg { name: "new_vocab_file" type: DT_STRING } input_arg { name: "old_vocab_file" type: DT_STRING } output_arg { name: "remapping" type: DT_INT64 } output_arg { name: "num_present" type: DT_INT32 } attr { name: "new_vocab_offset" type: "int" has_minimum: true } attr { name: "num_new_vocab" type: "int" has_minimum: true } attr { name: "old_vocab_size" type: "int" default_value { i: -1 } has_minimum: true minimum: -1 } -} -- | getSessionHandle :: forall v'1 t m' . (MonadBuild m', TensorType t) => Tensor v'1 t -- ^ __value__ -> m' (Tensor Value Data.ByteString.ByteString) -- ^ __handle__ getSessionHandle = getSessionHandle' id getSessionHandle' :: forall v'1 t m' . (MonadBuild m', TensorType t) => OpParams -> Tensor v'1 t -- ^ __value__ -> m' (Tensor Value Data.ByteString.ByteString) -- ^ __handle__ getSessionHandle' op'options value | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs value] buildOp [] (opDef "GetSessionHandle" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "value" type_attr: "T" } output_arg { name: "handle" type: DT_STRING } attr { name: "T" type: "type" } -} -- | getSessionHandleV2 :: forall v'1 t m' . (MonadBuild m', TensorType t) => Tensor v'1 t -- ^ __value__ -> m' (Tensor Value ResourceHandle) -- ^ __handle__ getSessionHandleV2 = getSessionHandleV2' id getSessionHandleV2' :: forall v'1 t m' . (MonadBuild m', TensorType t) => OpParams -> Tensor v'1 t -- ^ __value__ -> m' (Tensor Value ResourceHandle) -- ^ __handle__ getSessionHandleV2' op'options value | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs value] buildOp [] (opDef "GetSessionHandleV2" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "value" type_attr: "T" } output_arg { name: "handle" type: DT_RESOURCE } attr { name: "T" type: "type" } -} -- | getSessionTensor :: forall v'1 dtype m' . (MonadBuild m', TensorType dtype) => Tensor v'1 Data.ByteString.ByteString -- ^ __handle__ -> m' (Tensor Value dtype) -- ^ __value__ getSessionTensor = getSessionTensor' id getSessionTensor' :: forall v'1 dtype m' . (MonadBuild m', TensorType dtype) => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __handle__ -> m' (Tensor Value dtype) -- ^ __value__ getSessionTensor' op'options handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle] buildOp [] (opDef "GetSessionTensor" & opAttr "dtype" .~ tensorType (undefined :: dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_STRING } output_arg { name: "value" type_attr: "dtype" } attr { name: "dtype" type: "type" } -} -- | greater :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build Bool -- ^ __z__ greater = greater' id greater' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build Bool -- ^ __z__ greater' op'options x y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y] return (opDef "Greater" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type: DT_BOOL } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_INT64 type: DT_BFLOAT16 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } -} -- | greaterEqual :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build Bool -- ^ __z__ greaterEqual = greaterEqual' id greaterEqual' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build Bool -- ^ __z__ greaterEqual' op'options x y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y] return (opDef "GreaterEqual" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type: DT_BOOL } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_INT64 type: DT_BFLOAT16 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } -} -- | guaranteeConst :: forall v'1 t m' . (MonadBuild m', TensorType t) => Tensor v'1 t -- ^ __input__ -> m' (Tensor Value t) -- ^ __output__ guaranteeConst = guaranteeConst' id guaranteeConst' :: forall v'1 t m' . (MonadBuild m', TensorType t) => OpParams -> Tensor v'1 t -- ^ __input__ -> m' (Tensor Value t) -- ^ __output__ guaranteeConst' op'options input | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] buildOp [] (opDef "GuaranteeConst" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } -} -- | hSVToRGB :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __images__ -> Tensor Build t -- ^ __output__ hSVToRGB = hSVToRGB' id hSVToRGB' :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __images__ -> Tensor Build t -- ^ __output__ hSVToRGB' op'options images | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs images] return (opDef "HSVToRGB" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "images" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_HALF type: DT_BFLOAT16 type: DT_FLOAT type: DT_DOUBLE } } } -} -- | hashTable :: forall m' . (MonadBuild m') => DataType -- ^ __key_dtype__ -> DataType -- ^ __value_dtype__ -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __table_handle__ hashTable = hashTable' id hashTable' :: forall m' . (MonadBuild m') => OpParams -> DataType -- ^ __key_dtype__ -> DataType -- ^ __value_dtype__ -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __table_handle__ hashTable' op'options key_dtype value_dtype | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "HashTable" & opAttr "key_dtype" .~ key_dtype & opAttr "value_dtype" .~ value_dtype & op'options & opInputs .~ op'inputs) {- output_arg { name: "table_handle" type: DT_STRING is_ref: true } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } attr { name: "use_node_name_sharing" type: "bool" default_value { b: false } } attr { name: "key_dtype" type: "type" } attr { name: "value_dtype" type: "type" } -} -- | hashTableV2 :: forall m' . (MonadBuild m') => DataType -- ^ __key_dtype__ -> DataType -- ^ __value_dtype__ -> m' (Tensor Value ResourceHandle) -- ^ __table_handle__ hashTableV2 = hashTableV2' id hashTableV2' :: forall m' . (MonadBuild m') => OpParams -> DataType -- ^ __key_dtype__ -> DataType -- ^ __value_dtype__ -> m' (Tensor Value ResourceHandle) -- ^ __table_handle__ hashTableV2' op'options key_dtype value_dtype | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "HashTableV2" & opAttr "key_dtype" .~ key_dtype & opAttr "value_dtype" .~ value_dtype & op'options & opInputs .~ op'inputs) {- output_arg { name: "table_handle" type: DT_RESOURCE } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } attr { name: "use_node_name_sharing" type: "bool" default_value { b: false } } attr { name: "key_dtype" type: "type" } attr { name: "value_dtype" type: "type" } -} -- | histogramFixedWidth :: forall v'1 v'2 v'3 t dtype . (OneOf '[Data.Int.Int32, Data.Int.Int64, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] dtype) => Tensor v'1 t -- ^ __values__ -> Tensor v'2 t -- ^ __value_range__ -> Tensor v'3 Data.Int.Int32 -- ^ __nbins__ -> Tensor Build dtype -- ^ __out__ histogramFixedWidth = histogramFixedWidth' id histogramFixedWidth' :: forall v'1 v'2 v'3 t dtype . (OneOf '[Data.Int.Int32, Data.Int.Int64, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] dtype) => OpParams -> Tensor v'1 t -- ^ __values__ -> Tensor v'2 t -- ^ __value_range__ -> Tensor v'3 Data.Int.Int32 -- ^ __nbins__ -> Tensor Build dtype -- ^ __out__ histogramFixedWidth' op'options values value_range nbins | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs values, buildInputs value_range, buildInputs nbins] return (opDef "HistogramFixedWidth" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "dtype" .~ tensorType (undefined :: dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "values" type_attr: "T" } input_arg { name: "value_range" type_attr: "T" } input_arg { name: "nbins" type: DT_INT32 } output_arg { name: "out" type_attr: "dtype" } attr { name: "T" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "dtype" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | histogramSummary :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor v'1 Data.ByteString.ByteString -- ^ __tag__ -> Tensor v'2 t -- ^ __values__ -> Tensor Build Data.ByteString.ByteString -- ^ __summary__ histogramSummary = histogramSummary' id histogramSummary' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __tag__ -> Tensor v'2 t -- ^ __values__ -> Tensor Build Data.ByteString.ByteString -- ^ __summary__ histogramSummary' op'options tag values | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs tag, buildInputs values] return (opDef "HistogramSummary" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "tag" type: DT_STRING } input_arg { name: "values" type_attr: "T" } output_arg { name: "summary" type: DT_STRING } attr { name: "T" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_INT64 type: DT_BFLOAT16 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } -} -- | hostConst :: forall dtype . (TensorType dtype) => Tensor Build dtype -- ^ __output__ hostConst = hostConst' id hostConst' :: forall dtype . (TensorType dtype) => OpParams -> Tensor Build dtype -- ^ __output__ hostConst' op'options | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] return (opDef "HostConst" & opAttr "dtype" .~ tensorType (undefined :: dtype) & op'options & opInputs .~ op'inputs) {- output_arg { name: "output" type_attr: "dtype" } attr { name: "value" type: "tensor" } attr { name: "dtype" type: "type" } -} -- | iFFT :: forall v'1 tcomplex . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float)] tcomplex) => Tensor v'1 tcomplex -- ^ __input__ -> Tensor Build tcomplex -- ^ __output__ iFFT = iFFT' id iFFT' :: forall v'1 tcomplex . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float)] tcomplex) => OpParams -> Tensor v'1 tcomplex -- ^ __input__ -> Tensor Build tcomplex -- ^ __output__ iFFT' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "IFFT" & opAttr "Tcomplex" .~ tensorType (undefined :: tcomplex) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "Tcomplex" } output_arg { name: "output" type_attr: "Tcomplex" } attr { name: "Tcomplex" type: "type" default_value { type: DT_COMPLEX64 } allowed_values { list { type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | iFFT2D :: forall v'1 tcomplex . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float)] tcomplex) => Tensor v'1 tcomplex -- ^ __input__ -> Tensor Build tcomplex -- ^ __output__ iFFT2D = iFFT2D' id iFFT2D' :: forall v'1 tcomplex . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float)] tcomplex) => OpParams -> Tensor v'1 tcomplex -- ^ __input__ -> Tensor Build tcomplex -- ^ __output__ iFFT2D' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "IFFT2D" & opAttr "Tcomplex" .~ tensorType (undefined :: tcomplex) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "Tcomplex" } output_arg { name: "output" type_attr: "Tcomplex" } attr { name: "Tcomplex" type: "type" default_value { type: DT_COMPLEX64 } allowed_values { list { type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | iFFT3D :: forall v'1 tcomplex . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float)] tcomplex) => Tensor v'1 tcomplex -- ^ __input__ -> Tensor Build tcomplex -- ^ __output__ iFFT3D = iFFT3D' id iFFT3D' :: forall v'1 tcomplex . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float)] tcomplex) => OpParams -> Tensor v'1 tcomplex -- ^ __input__ -> Tensor Build tcomplex -- ^ __output__ iFFT3D' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "IFFT3D" & opAttr "Tcomplex" .~ tensorType (undefined :: tcomplex) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "Tcomplex" } output_arg { name: "output" type_attr: "Tcomplex" } attr { name: "Tcomplex" type: "type" default_value { type: DT_COMPLEX64 } allowed_values { list { type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | iRFFT :: Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__ -> Tensor v'2 Data.Int.Int32 -- ^ __fft_length__ -> Tensor Build Float -- ^ __output__ iRFFT = iRFFT' id iRFFT' :: OpParams -> Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__ -> Tensor v'2 Data.Int.Int32 -- ^ __fft_length__ -> Tensor Build Float -- ^ __output__ iRFFT' op'options input fft_length | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs fft_length] return (opDef "IRFFT" & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type: DT_COMPLEX64 } input_arg { name: "fft_length" type: DT_INT32 } output_arg { name: "output" type: DT_FLOAT } -} -- | iRFFT2D :: Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__ -> Tensor v'2 Data.Int.Int32 -- ^ __fft_length__ -> Tensor Build Float -- ^ __output__ iRFFT2D = iRFFT2D' id iRFFT2D' :: OpParams -> Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__ -> Tensor v'2 Data.Int.Int32 -- ^ __fft_length__ -> Tensor Build Float -- ^ __output__ iRFFT2D' op'options input fft_length | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs fft_length] return (opDef "IRFFT2D" & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type: DT_COMPLEX64 } input_arg { name: "fft_length" type: DT_INT32 } output_arg { name: "output" type: DT_FLOAT } -} -- | iRFFT3D :: Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__ -> Tensor v'2 Data.Int.Int32 -- ^ __fft_length__ -> Tensor Build Float -- ^ __output__ iRFFT3D = iRFFT3D' id iRFFT3D' :: OpParams -> Tensor v'1 (Data.Complex.Complex Float) -- ^ __input__ -> Tensor v'2 Data.Int.Int32 -- ^ __fft_length__ -> Tensor Build Float -- ^ __output__ iRFFT3D' op'options input fft_length | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs fft_length] return (opDef "IRFFT3D" & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type: DT_COMPLEX64 } input_arg { name: "fft_length" type: DT_INT32 } output_arg { name: "output" type: DT_FLOAT } -} -- | identity :: forall v'1 t . (TensorType t) => Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ identity = identity' id identity' :: forall v'1 t . (TensorType t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ identity' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "Identity" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } -} -- | identityN :: forall v'1 t . (TensorTypes t) => TensorList (v'1) t -- ^ __input__ -> TensorList (Build) t -- ^ __output__ identityN = identityN' id identityN' :: forall v'1 t . (TensorTypes t) => OpParams -> TensorList (v'1) t -- ^ __input__ -> TensorList (Build) t -- ^ __output__ identityN' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "IdentityN" & opAttr "T" .~ fromTensorTypes (Proxy :: Proxy t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_list_attr: "T" } output_arg { name: "output" type_list_attr: "T" } attr { name: "T" type: "list(type)" has_minimum: true minimum: 1 } -} -- | identityReader :: forall m' . (MonadBuild m') => m' (Tensor Ref Data.ByteString.ByteString) -- ^ __reader_handle__ identityReader = identityReader' id identityReader' :: forall m' . (MonadBuild m') => OpParams -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __reader_handle__ identityReader' op'options | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "IdentityReader" & op'options & opInputs .~ op'inputs) {- output_arg { name: "reader_handle" type: DT_STRING is_ref: true } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } -} -- | identityReaderV2 :: forall m' . (MonadBuild m') => m' (Tensor Value ResourceHandle) -- ^ __reader_handle__ identityReaderV2 = identityReaderV2' id identityReaderV2' :: forall m' . (MonadBuild m') => OpParams -> m' (Tensor Value ResourceHandle) -- ^ __reader_handle__ identityReaderV2' op'options | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "IdentityReaderV2" & op'options & opInputs .~ op'inputs) {- output_arg { name: "reader_handle" type: DT_RESOURCE } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } -} -- | igamma :: forall v'1 v'2 t . (OneOf '[Double, Float] t) => Tensor v'1 t -- ^ __a__ -> Tensor v'2 t -- ^ __x__ -> Tensor Build t -- ^ __z__ igamma = igamma' id igamma' :: forall v'1 v'2 t . (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __a__ -> Tensor v'2 t -- ^ __x__ -> Tensor Build t -- ^ __z__ igamma' op'options a x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs a, buildInputs x] return (opDef "Igamma" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "a" type_attr: "T" } input_arg { name: "x" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } -} -- | igammaGradA :: forall v'1 v'2 t . (OneOf '[Double, Float] t) => Tensor v'1 t -- ^ __a__ -> Tensor v'2 t -- ^ __x__ -> Tensor Build t -- ^ __z__ igammaGradA = igammaGradA' id igammaGradA' :: forall v'1 v'2 t . (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __a__ -> Tensor v'2 t -- ^ __x__ -> Tensor Build t -- ^ __z__ igammaGradA' op'options a x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs a, buildInputs x] return (opDef "IgammaGradA" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "a" type_attr: "T" } input_arg { name: "x" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } -} -- | igammac :: forall v'1 v'2 t . (OneOf '[Double, Float] t) => Tensor v'1 t -- ^ __a__ -> Tensor v'2 t -- ^ __x__ -> Tensor Build t -- ^ __z__ igammac = igammac' id igammac' :: forall v'1 v'2 t . (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __a__ -> Tensor v'2 t -- ^ __x__ -> Tensor Build t -- ^ __z__ igammac' op'options a x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs a, buildInputs x] return (opDef "Igammac" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "a" type_attr: "T" } input_arg { name: "x" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } -} -- | imag :: forall v'1 t tout . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float)] t, OneOf '[Double, Float] tout) => Tensor v'1 t -- ^ __input__ -> Tensor Build tout -- ^ __output__ imag = imag' id imag' :: forall v'1 t tout . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float)] t, OneOf '[Double, Float] tout) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor Build tout -- ^ __output__ imag' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "Imag" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tout" .~ tensorType (undefined :: tout) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type_attr: "Tout" } attr { name: "T" type: "type" default_value { type: DT_COMPLEX64 } allowed_values { list { type: DT_COMPLEX64 type: DT_COMPLEX128 } } } attr { name: "Tout" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } -} -- | imageSummary :: forall v'1 v'2 t . (OneOf '[Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 Data.ByteString.ByteString -- ^ __tag__ -> Tensor v'2 t -- ^ __tensor__ -> Tensor Build Data.ByteString.ByteString -- ^ __summary__ imageSummary = imageSummary' id imageSummary' :: forall v'1 v'2 t . (OneOf '[Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __tag__ -> Tensor v'2 t -- ^ __tensor__ -> Tensor Build Data.ByteString.ByteString -- ^ __summary__ imageSummary' op'options tag tensor | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs tag, buildInputs tensor] return (opDef "ImageSummary" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "tag" type: DT_STRING } input_arg { name: "tensor" type_attr: "T" } output_arg { name: "summary" type: DT_STRING } attr { name: "max_images" type: "int" default_value { i: 3 } has_minimum: true minimum: 1 } attr { name: "T" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_UINT8 type: DT_FLOAT type: DT_HALF type: DT_DOUBLE } } } attr { name: "bad_color" type: "tensor" default_value { tensor { dtype: DT_UINT8 tensor_shape { dim { size: 4 } } int_val: 255 int_val: 0 int_val: 0 int_val: 255 } } } -} -- | immutableConst :: forall dtype . (TensorType dtype) => Shape -- ^ __shape__ -> Tensor Build dtype -- ^ __tensor__ immutableConst = immutableConst' id immutableConst' :: forall dtype . (TensorType dtype) => OpParams -> Shape -- ^ __shape__ -> Tensor Build dtype -- ^ __tensor__ immutableConst' op'options shape | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] return (opDef "ImmutableConst" & opAttr "dtype" .~ tensorType (undefined :: dtype) & opAttr "shape" .~ shape & op'options & opInputs .~ op'inputs) {- output_arg { name: "tensor" type_attr: "dtype" } attr { name: "dtype" type: "type" } attr { name: "shape" type: "shape" } attr { name: "memory_region_name" type: "string" } -} -- | importEvent :: forall v'1 v'2 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __writer__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __event__ -> m' (ControlNode) importEvent = importEvent' id importEvent' :: forall v'1 v'2 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __writer__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __event__ -> m' (ControlNode) importEvent' op'options writer event | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs writer, buildInputs event] buildOp [] (opDef "ImportEvent" & op'options & opInputs .~ op'inputs) {- input_arg { name: "writer" type: DT_RESOURCE } input_arg { name: "event" type: DT_STRING } -} -- | inTopK :: forall v'1 v'2 t . (OneOf '[Data.Int.Int32, Data.Int.Int64] t) => Data.Int.Int64 -- ^ __k__ -> Tensor v'1 Float -- ^ __predictions__ -> Tensor v'2 t -- ^ __targets__ -> Tensor Build Bool -- ^ __precision__ inTopK = inTopK' id inTopK' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int32, Data.Int.Int64] t) => OpParams -> Data.Int.Int64 -- ^ __k__ -> Tensor v'1 Float -- ^ __predictions__ -> Tensor v'2 t -- ^ __targets__ -> Tensor Build Bool -- ^ __precision__ inTopK' op'options k predictions targets | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs predictions, buildInputs targets] return (opDef "InTopK" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "k" .~ k & op'options & opInputs .~ op'inputs) {- input_arg { name: "predictions" type: DT_FLOAT } input_arg { name: "targets" type_attr: "T" } output_arg { name: "precision" type: DT_BOOL } attr { name: "k" type: "int" } attr { name: "T" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | inTopKV2 :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int32, Data.Int.Int64] t) => Tensor v'1 Float -- ^ __predictions__ -> Tensor v'2 t -- ^ __targets__ -> Tensor v'3 t -- ^ __k__ -> Tensor Build Bool -- ^ __precision__ inTopKV2 = inTopKV2' id inTopKV2' :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int32, Data.Int.Int64] t) => OpParams -> Tensor v'1 Float -- ^ __predictions__ -> Tensor v'2 t -- ^ __targets__ -> Tensor v'3 t -- ^ __k__ -> Tensor Build Bool -- ^ __precision__ inTopKV2' op'options predictions targets k | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs predictions, buildInputs targets, buildInputs k] return (opDef "InTopKV2" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "predictions" type: DT_FLOAT } input_arg { name: "targets" type_attr: "T" } input_arg { name: "k" type_attr: "T" } output_arg { name: "precision" type: DT_BOOL } attr { name: "T" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | infeedDequeue :: forall dtype m' . (MonadBuild m', TensorType dtype) => Shape -- ^ __shape__ -> m' (Tensor Value dtype) -- ^ __output__ infeedDequeue = infeedDequeue' id infeedDequeue' :: forall dtype m' . (MonadBuild m', TensorType dtype) => OpParams -> Shape -- ^ __shape__ -> m' (Tensor Value dtype) -- ^ __output__ infeedDequeue' op'options shape | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "InfeedDequeue" & opAttr "dtype" .~ tensorType (undefined :: dtype) & opAttr "shape" .~ shape & op'options & opInputs .~ op'inputs) {- output_arg { name: "output" type_attr: "dtype" } attr { name: "dtype" type: "type" } attr { name: "shape" type: "shape" } -} -- | infeedDequeueTuple :: forall dtypes m' . (MonadBuild m', TensorTypes dtypes) => m' (TensorList (Value) dtypes) -- ^ __outputs__ infeedDequeueTuple = infeedDequeueTuple' id infeedDequeueTuple' :: forall dtypes m' . (MonadBuild m', TensorTypes dtypes) => OpParams -> m' (TensorList (Value) dtypes) -- ^ __outputs__ infeedDequeueTuple' op'options | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "InfeedDequeueTuple" & opAttr "dtypes" .~ fromTensorTypes (Proxy :: Proxy dtypes) & op'options & opInputs .~ op'inputs) {- output_arg { name: "outputs" type_list_attr: "dtypes" } attr { name: "dtypes" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "shapes" type: "list(shape)" } -} -- | infeedEnqueue :: forall v'1 dtype m' . (MonadBuild m', TensorType dtype) => Tensor v'1 dtype -- ^ __input__ -> m' (ControlNode) infeedEnqueue = infeedEnqueue' id infeedEnqueue' :: forall v'1 dtype m' . (MonadBuild m', TensorType dtype) => OpParams -> Tensor v'1 dtype -- ^ __input__ -> m' (ControlNode) infeedEnqueue' op'options input | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] buildOp [] (opDef "InfeedEnqueue" & opAttr "dtype" .~ tensorType (undefined :: dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "dtype" } attr { name: "dtype" type: "type" } attr { name: "shape" type: "shape" default_value { shape { } } } attr { name: "layout" type: "list(int)" default_value { list { } } } attr { name: "device_ordinal" type: "int" default_value { i: -1 } } -} -- | infeedEnqueuePrelinearizedBuffer :: forall v'1 m' . (MonadBuild m') => Tensor v'1 Variant -- ^ __input__ -> m' (ControlNode) infeedEnqueuePrelinearizedBuffer = infeedEnqueuePrelinearizedBuffer' id infeedEnqueuePrelinearizedBuffer' :: forall v'1 m' . (MonadBuild m') => OpParams -> Tensor v'1 Variant -- ^ __input__ -> m' (ControlNode) infeedEnqueuePrelinearizedBuffer' op'options input | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] buildOp [] (opDef "InfeedEnqueuePrelinearizedBuffer" & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type: DT_VARIANT } attr { name: "device_ordinal" type: "int" default_value { i: -1 } } -} -- | infeedEnqueueTuple :: forall v'1 dtypes m' . (MonadBuild m', TensorTypes dtypes) => TensorList (v'1) dtypes -- ^ __inputs__ -> m' (ControlNode) infeedEnqueueTuple = infeedEnqueueTuple' id infeedEnqueueTuple' :: forall v'1 dtypes m' . (MonadBuild m', TensorTypes dtypes) => OpParams -> TensorList (v'1) dtypes -- ^ __inputs__ -> m' (ControlNode) infeedEnqueueTuple' op'options inputs | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs inputs] buildOp [] (opDef "InfeedEnqueueTuple" & opAttr "dtypes" .~ fromTensorTypes (Proxy :: Proxy dtypes) & op'options & opInputs .~ op'inputs) {- input_arg { name: "inputs" type_list_attr: "dtypes" } attr { name: "dtypes" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "shapes" type: "list(shape)" } attr { name: "layouts" type: "list(int)" default_value { list { } } } attr { name: "device_ordinal" type: "int" default_value { i: -1 } } -} -- | initializeTable :: forall v'2 v'3 tkey tval m' . (MonadBuild m', TensorType tkey, TensorType tval) => Tensor Ref Data.ByteString.ByteString -- ^ __table_handle__ -> Tensor v'2 tkey -- ^ __keys__ -> Tensor v'3 tval -- ^ __values__ -> m' (ControlNode) initializeTable = initializeTable' id initializeTable' :: forall v'2 v'3 tkey tval m' . (MonadBuild m', TensorType tkey, TensorType tval) => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __table_handle__ -> Tensor v'2 tkey -- ^ __keys__ -> Tensor v'3 tval -- ^ __values__ -> m' (ControlNode) initializeTable' op'options table_handle keys values | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs table_handle, buildInputs keys, buildInputs values] buildOp [] (opDef "InitializeTable" & opAttr "Tkey" .~ tensorType (undefined :: tkey) & opAttr "Tval" .~ tensorType (undefined :: tval) & op'options & opInputs .~ op'inputs) {- input_arg { name: "table_handle" type: DT_STRING is_ref: true } input_arg { name: "keys" type_attr: "Tkey" } input_arg { name: "values" type_attr: "Tval" } attr { name: "Tkey" type: "type" } attr { name: "Tval" type: "type" } -} -- | initializeTableFromTextFile :: forall v'2 m' . (MonadBuild m') => Data.Int.Int64 -- ^ __key_index__ -> Data.Int.Int64 -- ^ __value_index__ -> Tensor Ref Data.ByteString.ByteString -- ^ __table_handle__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __filename__ -> m' (ControlNode) initializeTableFromTextFile = initializeTableFromTextFile' id initializeTableFromTextFile' :: forall v'2 m' . (MonadBuild m') => OpParams -> Data.Int.Int64 -- ^ __key_index__ -> Data.Int.Int64 -- ^ __value_index__ -> Tensor Ref Data.ByteString.ByteString -- ^ __table_handle__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __filename__ -> m' (ControlNode) initializeTableFromTextFile' op'options key_index value_index table_handle filename | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs table_handle, buildInputs filename] buildOp [] (opDef "InitializeTableFromTextFile" & opAttr "key_index" .~ key_index & opAttr "value_index" .~ value_index & op'options & opInputs .~ op'inputs) {- input_arg { name: "table_handle" type: DT_STRING is_ref: true } input_arg { name: "filename" type: DT_STRING } attr { name: "key_index" type: "int" has_minimum: true minimum: -2 } attr { name: "value_index" type: "int" has_minimum: true minimum: -2 } attr { name: "vocab_size" type: "int" default_value { i: -1 } has_minimum: true minimum: -1 } attr { name: "delimiter" type: "string" default_value { s: "\t" } } -} -- | initializeTableFromTextFileV2 :: forall v'1 v'2 m' . (MonadBuild m') => Data.Int.Int64 -- ^ __key_index__ -> Data.Int.Int64 -- ^ __value_index__ -> Tensor v'1 ResourceHandle -- ^ __table_handle__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __filename__ -> m' (ControlNode) initializeTableFromTextFileV2 = initializeTableFromTextFileV2' id initializeTableFromTextFileV2' :: forall v'1 v'2 m' . (MonadBuild m') => OpParams -> Data.Int.Int64 -- ^ __key_index__ -> Data.Int.Int64 -- ^ __value_index__ -> Tensor v'1 ResourceHandle -- ^ __table_handle__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __filename__ -> m' (ControlNode) initializeTableFromTextFileV2' op'options key_index value_index table_handle filename | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs table_handle, buildInputs filename] buildOp [] (opDef "InitializeTableFromTextFileV2" & opAttr "key_index" .~ key_index & opAttr "value_index" .~ value_index & op'options & opInputs .~ op'inputs) {- input_arg { name: "table_handle" type: DT_RESOURCE } input_arg { name: "filename" type: DT_STRING } attr { name: "key_index" type: "int" has_minimum: true minimum: -2 } attr { name: "value_index" type: "int" has_minimum: true minimum: -2 } attr { name: "vocab_size" type: "int" default_value { i: -1 } has_minimum: true minimum: -1 } attr { name: "delimiter" type: "string" default_value { s: "\t" } } -} -- | initializeTableV2 :: forall v'1 v'2 v'3 tkey tval m' . (MonadBuild m', TensorType tkey, TensorType tval) => Tensor v'1 ResourceHandle -- ^ __table_handle__ -> Tensor v'2 tkey -- ^ __keys__ -> Tensor v'3 tval -- ^ __values__ -> m' (ControlNode) initializeTableV2 = initializeTableV2' id initializeTableV2' :: forall v'1 v'2 v'3 tkey tval m' . (MonadBuild m', TensorType tkey, TensorType tval) => OpParams -> Tensor v'1 ResourceHandle -- ^ __table_handle__ -> Tensor v'2 tkey -- ^ __keys__ -> Tensor v'3 tval -- ^ __values__ -> m' (ControlNode) initializeTableV2' op'options table_handle keys values | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs table_handle, buildInputs keys, buildInputs values] buildOp [] (opDef "InitializeTableV2" & opAttr "Tkey" .~ tensorType (undefined :: tkey) & opAttr "Tval" .~ tensorType (undefined :: tval) & op'options & opInputs .~ op'inputs) {- input_arg { name: "table_handle" type: DT_RESOURCE } input_arg { name: "keys" type_attr: "Tkey" } input_arg { name: "values" type_attr: "Tval" } attr { name: "Tkey" type: "type" } attr { name: "Tval" type: "type" } -} -- | inplaceAdd :: forall v'1 v'2 v'3 t . (TensorType t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 Data.Int.Int32 -- ^ __i__ -> Tensor v'3 t -- ^ __v__ -> Tensor Build t -- ^ __y__ inplaceAdd = inplaceAdd' id inplaceAdd' :: forall v'1 v'2 v'3 t . (TensorType t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 Data.Int.Int32 -- ^ __i__ -> Tensor v'3 t -- ^ __v__ -> Tensor Build t -- ^ __y__ inplaceAdd' op'options x i v | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs i, buildInputs v] return (opDef "InplaceAdd" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "i" type: DT_INT32 } input_arg { name: "v" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" } -} -- | inplaceSub :: forall v'1 v'2 v'3 t . (TensorType t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 Data.Int.Int32 -- ^ __i__ -> Tensor v'3 t -- ^ __v__ -> Tensor Build t -- ^ __y__ inplaceSub = inplaceSub' id inplaceSub' :: forall v'1 v'2 v'3 t . (TensorType t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 Data.Int.Int32 -- ^ __i__ -> Tensor v'3 t -- ^ __v__ -> Tensor Build t -- ^ __y__ inplaceSub' op'options x i v | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs i, buildInputs v] return (opDef "InplaceSub" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "i" type: DT_INT32 } input_arg { name: "v" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" } -} -- | inplaceUpdate :: forall v'1 v'2 v'3 t . (TensorType t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 Data.Int.Int32 -- ^ __i__ -> Tensor v'3 t -- ^ __v__ -> Tensor Build t -- ^ __y__ inplaceUpdate = inplaceUpdate' id inplaceUpdate' :: forall v'1 v'2 v'3 t . (TensorType t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 Data.Int.Int32 -- ^ __i__ -> Tensor v'3 t -- ^ __v__ -> Tensor Build t -- ^ __y__ inplaceUpdate' op'options x i v | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs i, buildInputs v] return (opDef "InplaceUpdate" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "i" type: DT_INT32 } input_arg { name: "v" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" } -} -- | inv :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ inv = inv' id inv' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ inv' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "Inv" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | invGrad :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __y__ -> Tensor v'2 t -- ^ __dy__ -> Tensor Build t -- ^ __z__ invGrad = invGrad' id invGrad' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __y__ -> Tensor v'2 t -- ^ __dy__ -> Tensor Build t -- ^ __z__ invGrad' op'options y dy | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs y, buildInputs dy] return (opDef "InvGrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "y" type_attr: "T" } input_arg { name: "dy" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | invert :: forall v'1 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ invert = invert' id invert' :: forall v'1 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ invert' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "Invert" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_INT8 type: DT_INT16 type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_UINT16 type: DT_UINT32 type: DT_UINT64 } } } -} -- | invertPermutation :: forall v'1 t . (OneOf '[Data.Int.Int32, Data.Int.Int64] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ invertPermutation = invertPermutation' id invertPermutation' :: forall v'1 t . (OneOf '[Data.Int.Int32, Data.Int.Int64] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ invertPermutation' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "InvertPermutation" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | isBoostedTreesEnsembleInitialized :: forall v'1 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __tree_ensemble_handle__ -> m' (Tensor Value Bool) -- ^ __is_initialized__ isBoostedTreesEnsembleInitialized = isBoostedTreesEnsembleInitialized' id isBoostedTreesEnsembleInitialized' :: forall v'1 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __tree_ensemble_handle__ -> m' (Tensor Value Bool) -- ^ __is_initialized__ isBoostedTreesEnsembleInitialized' op'options tree_ensemble_handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs tree_ensemble_handle] buildOp [] (opDef "IsBoostedTreesEnsembleInitialized" & op'options & opInputs .~ op'inputs) {- input_arg { name: "tree_ensemble_handle" type: DT_RESOURCE } output_arg { name: "is_initialized" type: DT_BOOL } -} -- | isBoostedTreesQuantileStreamResourceInitialized :: forall v'1 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __quantile_stream_resource_handle__ -> m' (Tensor Value Bool) -- ^ __is_initialized__ isBoostedTreesQuantileStreamResourceInitialized = isBoostedTreesQuantileStreamResourceInitialized' id isBoostedTreesQuantileStreamResourceInitialized' :: forall v'1 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __quantile_stream_resource_handle__ -> m' (Tensor Value Bool) -- ^ __is_initialized__ isBoostedTreesQuantileStreamResourceInitialized' op'options quantile_stream_resource_handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs quantile_stream_resource_handle] buildOp [] (opDef "IsBoostedTreesQuantileStreamResourceInitialized" & op'options & opInputs .~ op'inputs) {- input_arg { name: "quantile_stream_resource_handle" type: DT_RESOURCE } output_arg { name: "is_initialized" type: DT_BOOL } -} -- | isFinite :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build Bool -- ^ __y__ isFinite = isFinite' id isFinite' :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build Bool -- ^ __y__ isFinite' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "IsFinite" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type: DT_BOOL } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } -} -- | isInf :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build Bool -- ^ __y__ isInf = isInf' id isInf' :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build Bool -- ^ __y__ isInf' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "IsInf" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type: DT_BOOL } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } -} -- | isNan :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build Bool -- ^ __y__ isNan = isNan' id isNan' :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build Bool -- ^ __y__ isNan' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "IsNan" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type: DT_BOOL } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } -} -- | isVariableInitialized :: forall dtype m' . (MonadBuild m', TensorType dtype) => Tensor Ref dtype -- ^ __ref__ -> m' (Tensor Value Bool) -- ^ __is_initialized__ isVariableInitialized = isVariableInitialized' id isVariableInitialized' :: forall dtype m' . (MonadBuild m', TensorType dtype) => OpParams -> Tensor Ref dtype -- ^ __ref__ -> m' (Tensor Value Bool) -- ^ __is_initialized__ isVariableInitialized' op'options ref | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs ref] buildOp [] (opDef "IsVariableInitialized" & opAttr "dtype" .~ tensorType (undefined :: dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "ref" type_attr: "dtype" is_ref: true } output_arg { name: "is_initialized" type: DT_BOOL } attr { name: "dtype" type: "type" } -} -- | iterator :: forall m' . (MonadBuild m') => [DataType] -- ^ __output_types__ -> m' (Tensor Value ResourceHandle) -- ^ __handle__ iterator = iterator' id iterator' :: forall m' . (MonadBuild m') => OpParams -> [DataType] -- ^ __output_types__ -> m' (Tensor Value ResourceHandle) -- ^ __handle__ iterator' op'options output_types | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "Iterator" & opAttr "output_types" .~ output_types & op'options & opInputs .~ op'inputs) {- output_arg { name: "handle" type: DT_RESOURCE } attr { name: "shared_name" type: "string" } attr { name: "container" type: "string" } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } -} -- | iteratorFromStringHandle :: forall v'1 m' . (MonadBuild m') => Tensor v'1 Data.ByteString.ByteString -- ^ __string_handle__ -> m' (Tensor Value ResourceHandle) -- ^ __resource_handle__ iteratorFromStringHandle = iteratorFromStringHandle' id iteratorFromStringHandle' :: forall v'1 m' . (MonadBuild m') => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __string_handle__ -> m' (Tensor Value ResourceHandle) -- ^ __resource_handle__ iteratorFromStringHandle' op'options string_handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs string_handle] buildOp [] (opDef "IteratorFromStringHandle" & op'options & opInputs .~ op'inputs) {- input_arg { name: "string_handle" type: DT_STRING } output_arg { name: "resource_handle" type: DT_RESOURCE } attr { name: "output_types" type: "list(type)" default_value { list { } } has_minimum: true } attr { name: "output_shapes" type: "list(shape)" default_value { list { } } has_minimum: true } -} -- | iteratorFromStringHandleV2 :: forall v'1 m' . (MonadBuild m') => Tensor v'1 Data.ByteString.ByteString -- ^ __string_handle__ -> m' (Tensor Value ResourceHandle) -- ^ __resource_handle__ iteratorFromStringHandleV2 = iteratorFromStringHandleV2' id iteratorFromStringHandleV2' :: forall v'1 m' . (MonadBuild m') => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __string_handle__ -> m' (Tensor Value ResourceHandle) -- ^ __resource_handle__ iteratorFromStringHandleV2' op'options string_handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs string_handle] buildOp [] (opDef "IteratorFromStringHandleV2" & op'options & opInputs .~ op'inputs) {- input_arg { name: "string_handle" type: DT_STRING } output_arg { name: "resource_handle" type: DT_RESOURCE } attr { name: "output_types" type: "list(type)" default_value { list { } } has_minimum: true } attr { name: "output_shapes" type: "list(shape)" default_value { list { } } has_minimum: true } -} -- | iteratorGetNext :: forall v'1 output_types m' . (MonadBuild m', TensorTypes output_types) => Tensor v'1 ResourceHandle -- ^ __iterator__ -> m' (TensorList (Value) output_types) -- ^ __components__ iteratorGetNext = iteratorGetNext' id iteratorGetNext' :: forall v'1 output_types m' . (MonadBuild m', TensorTypes output_types) => OpParams -> Tensor v'1 ResourceHandle -- ^ __iterator__ -> m' (TensorList (Value) output_types) -- ^ __components__ iteratorGetNext' op'options iterator | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs iterator] buildOp [] (opDef "IteratorGetNext" & opAttr "output_types" .~ fromTensorTypes (Proxy :: Proxy output_types) & op'options & opInputs .~ op'inputs) {- input_arg { name: "iterator" type: DT_RESOURCE } output_arg { name: "components" type_list_attr: "output_types" } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } -} -- | iteratorGetNextAsOptional :: forall v'1 m' . (MonadBuild m') => [DataType] -- ^ __output_types__ -> Tensor v'1 ResourceHandle -- ^ __iterator__ -> m' (Tensor Value Variant) -- ^ __optional__ iteratorGetNextAsOptional = iteratorGetNextAsOptional' id iteratorGetNextAsOptional' :: forall v'1 m' . (MonadBuild m') => OpParams -> [DataType] -- ^ __output_types__ -> Tensor v'1 ResourceHandle -- ^ __iterator__ -> m' (Tensor Value Variant) -- ^ __optional__ iteratorGetNextAsOptional' op'options output_types iterator | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs iterator] buildOp [] (opDef "IteratorGetNextAsOptional" & opAttr "output_types" .~ output_types & op'options & opInputs .~ op'inputs) {- input_arg { name: "iterator" type: DT_RESOURCE } output_arg { name: "optional" type: DT_VARIANT } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } -} -- | iteratorGetNextSync :: forall v'1 output_types m' . (MonadBuild m', TensorTypes output_types) => Tensor v'1 ResourceHandle -- ^ __iterator__ -> m' (TensorList (Value) output_types) -- ^ __components__ iteratorGetNextSync = iteratorGetNextSync' id iteratorGetNextSync' :: forall v'1 output_types m' . (MonadBuild m', TensorTypes output_types) => OpParams -> Tensor v'1 ResourceHandle -- ^ __iterator__ -> m' (TensorList (Value) output_types) -- ^ __components__ iteratorGetNextSync' op'options iterator | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs iterator] buildOp [] (opDef "IteratorGetNextSync" & opAttr "output_types" .~ fromTensorTypes (Proxy :: Proxy output_types) & op'options & opInputs .~ op'inputs) {- input_arg { name: "iterator" type: DT_RESOURCE } output_arg { name: "components" type_list_attr: "output_types" } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } -} -- | iteratorToStringHandle :: forall v'1 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __resource_handle__ -> m' (Tensor Value Data.ByteString.ByteString) -- ^ __string_handle__ iteratorToStringHandle = iteratorToStringHandle' id iteratorToStringHandle' :: forall v'1 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __resource_handle__ -> m' (Tensor Value Data.ByteString.ByteString) -- ^ __string_handle__ iteratorToStringHandle' op'options resource_handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs resource_handle] buildOp [] (opDef "IteratorToStringHandle" & op'options & opInputs .~ op'inputs) {- input_arg { name: "resource_handle" type: DT_RESOURCE } output_arg { name: "string_handle" type: DT_STRING } -} -- | iteratorV2 :: forall m' . (MonadBuild m') => [DataType] -- ^ __output_types__ -> m' (Tensor Value ResourceHandle) -- ^ __handle__ iteratorV2 = iteratorV2' id iteratorV2' :: forall m' . (MonadBuild m') => OpParams -> [DataType] -- ^ __output_types__ -> m' (Tensor Value ResourceHandle) -- ^ __handle__ iteratorV2' op'options output_types | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "IteratorV2" & opAttr "output_types" .~ output_types & op'options & opInputs .~ op'inputs) {- output_arg { name: "handle" type: DT_RESOURCE } attr { name: "shared_name" type: "string" } attr { name: "container" type: "string" } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } -} -- | kMC2ChainInitialization :: Tensor v'1 Float -- ^ __distances__ -> Tensor v'2 Data.Int.Int64 -- ^ __seed__ -> Tensor Build Data.Int.Int64 -- ^ __index__ kMC2ChainInitialization = kMC2ChainInitialization' id kMC2ChainInitialization' :: OpParams -> Tensor v'1 Float -- ^ __distances__ -> Tensor v'2 Data.Int.Int64 -- ^ __seed__ -> Tensor Build Data.Int.Int64 -- ^ __index__ kMC2ChainInitialization' op'options distances seed | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs distances, buildInputs seed] return (opDef "KMC2ChainInitialization" & op'options & opInputs .~ op'inputs) {- input_arg { name: "distances" type: DT_FLOAT } input_arg { name: "seed" type: DT_INT64 } output_arg { name: "index" type: DT_INT64 } -} -- | kmeansPlusPlusInitialization :: Tensor v'1 Float -- ^ __points__ -> Tensor v'2 Data.Int.Int64 -- ^ __num_to_sample__ -> Tensor v'3 Data.Int.Int64 -- ^ __seed__ -> Tensor v'4 Data.Int.Int64 -- ^ __num_retries_per_sample__ -> Tensor Build Float -- ^ __samples__ kmeansPlusPlusInitialization = kmeansPlusPlusInitialization' id kmeansPlusPlusInitialization' :: OpParams -> Tensor v'1 Float -- ^ __points__ -> Tensor v'2 Data.Int.Int64 -- ^ __num_to_sample__ -> Tensor v'3 Data.Int.Int64 -- ^ __seed__ -> Tensor v'4 Data.Int.Int64 -- ^ __num_retries_per_sample__ -> Tensor Build Float -- ^ __samples__ kmeansPlusPlusInitialization' op'options points num_to_sample seed num_retries_per_sample | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs points, buildInputs num_to_sample, buildInputs seed, buildInputs num_retries_per_sample] return (opDef "KmeansPlusPlusInitialization" & op'options & opInputs .~ op'inputs) {- input_arg { name: "points" type: DT_FLOAT } input_arg { name: "num_to_sample" type: DT_INT64 } input_arg { name: "seed" type: DT_INT64 } input_arg { name: "num_retries_per_sample" type: DT_INT64 } output_arg { name: "samples" type: DT_FLOAT } -} -- | l2Loss :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __t__ -> Tensor Build t -- ^ __output__ l2Loss = l2Loss' id l2Loss' :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __t__ -> Tensor Build t -- ^ __output__ l2Loss' op'options t | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs t] return (opDef "L2Loss" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "t" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_BFLOAT16 type: DT_FLOAT type: DT_DOUBLE } } } -} -- | lMDBReader :: forall m' . (MonadBuild m') => m' (Tensor Ref Data.ByteString.ByteString) -- ^ __reader_handle__ lMDBReader = lMDBReader' id lMDBReader' :: forall m' . (MonadBuild m') => OpParams -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __reader_handle__ lMDBReader' op'options | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "LMDBReader" & op'options & opInputs .~ op'inputs) {- output_arg { name: "reader_handle" type: DT_STRING is_ref: true } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } -} -- | lRN :: forall v'1 t . (OneOf '[Data.Word.Word16, Float] t) => Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ lRN = lRN' id lRN' :: forall v'1 t . (OneOf '[Data.Word.Word16, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ lRN' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "LRN" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "depth_radius" type: "int" default_value { i: 5 } } attr { name: "bias" type: "float" default_value { f: 1.0 } } attr { name: "alpha" type: "float" default_value { f: 1.0 } } attr { name: "beta" type: "float" default_value { f: 0.5 } } attr { name: "T" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_HALF type: DT_BFLOAT16 type: DT_FLOAT } } } -} -- | lRNGrad :: forall v'1 v'2 v'3 t . (OneOf '[Data.Word.Word16, Float] t) => Tensor v'1 t -- ^ __input_grads__ -> Tensor v'2 t -- ^ __input_image__ -> Tensor v'3 t -- ^ __output_image__ -> Tensor Build t -- ^ __output__ lRNGrad = lRNGrad' id lRNGrad' :: forall v'1 v'2 v'3 t . (OneOf '[Data.Word.Word16, Float] t) => OpParams -> Tensor v'1 t -- ^ __input_grads__ -> Tensor v'2 t -- ^ __input_image__ -> Tensor v'3 t -- ^ __output_image__ -> Tensor Build t -- ^ __output__ lRNGrad' op'options input_grads input_image output_image | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_grads, buildInputs input_image, buildInputs output_image] return (opDef "LRNGrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_grads" type_attr: "T" } input_arg { name: "input_image" type_attr: "T" } input_arg { name: "output_image" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "depth_radius" type: "int" default_value { i: 5 } } attr { name: "bias" type: "float" default_value { f: 1.0 } } attr { name: "alpha" type: "float" default_value { f: 1.0 } } attr { name: "beta" type: "float" default_value { f: 0.5 } } attr { name: "T" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_HALF type: DT_BFLOAT16 type: DT_FLOAT } } } -} -- | leakyRelu :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __features__ -> Tensor Build t -- ^ __activations__ leakyRelu = leakyRelu' id leakyRelu' :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __features__ -> Tensor Build t -- ^ __activations__ leakyRelu' op'options features | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs features] return (opDef "LeakyRelu" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "features" type_attr: "T" } output_arg { name: "activations" type_attr: "T" } attr { name: "alpha" type: "float" default_value { f: 0.2 } } attr { name: "T" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_HALF type: DT_BFLOAT16 type: DT_FLOAT type: DT_DOUBLE } } } -} -- | leakyReluGrad :: forall v'1 v'2 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __gradients__ -> Tensor v'2 t -- ^ __features__ -> Tensor Build t -- ^ __backprops__ leakyReluGrad = leakyReluGrad' id leakyReluGrad' :: forall v'1 v'2 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __gradients__ -> Tensor v'2 t -- ^ __features__ -> Tensor Build t -- ^ __backprops__ leakyReluGrad' op'options gradients features | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs gradients, buildInputs features] return (opDef "LeakyReluGrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "gradients" type_attr: "T" } input_arg { name: "features" type_attr: "T" } output_arg { name: "backprops" type_attr: "T" } attr { name: "alpha" type: "float" default_value { f: 0.2 } } attr { name: "T" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_HALF type: DT_BFLOAT16 type: DT_FLOAT type: DT_DOUBLE } } } -} -- | learnedUnigramCandidateSampler :: forall v'1 m' . (MonadBuild m') => Data.Int.Int64 -- ^ __num_sampled__ -> Data.Int.Int64 -- ^ __num_true__ -> Data.Int.Int64 -- ^ __range_max__ -> Bool -- ^ __unique__ -> Tensor v'1 Data.Int.Int64 -- ^ __true_classes__ -> m' ((Tensor Value Data.Int.Int64, Tensor Value Float, Tensor Value Float)) -- ^ (__sampled_candidates__, __true_expected_count__, __sampled_expected_count__) -- -- * __sampled_candidates__ -- -- * __true_expected_count__ -- -- * __sampled_expected_count__ learnedUnigramCandidateSampler = learnedUnigramCandidateSampler' id learnedUnigramCandidateSampler' :: forall v'1 m' . (MonadBuild m') => OpParams -> Data.Int.Int64 -- ^ __num_sampled__ -> Data.Int.Int64 -- ^ __num_true__ -> Data.Int.Int64 -- ^ __range_max__ -> Bool -- ^ __unique__ -> Tensor v'1 Data.Int.Int64 -- ^ __true_classes__ -> m' ((Tensor Value Data.Int.Int64, Tensor Value Float, Tensor Value Float)) -- ^ (__sampled_candidates__, __true_expected_count__, __sampled_expected_count__) -- -- * __sampled_candidates__ -- -- * __true_expected_count__ -- -- * __sampled_expected_count__ learnedUnigramCandidateSampler' op'options num_sampled num_true range_max unique true_classes | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs true_classes] buildOp [] (opDef "LearnedUnigramCandidateSampler" & opAttr "num_sampled" .~ num_sampled & opAttr "num_true" .~ num_true & opAttr "range_max" .~ range_max & opAttr "unique" .~ unique & op'options & opInputs .~ op'inputs) {- input_arg { name: "true_classes" type: DT_INT64 } output_arg { name: "sampled_candidates" type: DT_INT64 } output_arg { name: "true_expected_count" type: DT_FLOAT } output_arg { name: "sampled_expected_count" type: DT_FLOAT } attr { name: "num_true" type: "int" has_minimum: true minimum: 1 } attr { name: "num_sampled" type: "int" has_minimum: true minimum: 1 } attr { name: "unique" type: "bool" } attr { name: "range_max" type: "int" has_minimum: true minimum: 1 } attr { name: "seed" type: "int" default_value { i: 0 } } attr { name: "seed2" type: "int" default_value { i: 0 } } -} -- | leftShift :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ leftShift = leftShift' id leftShift' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ leftShift' op'options x y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y] return (opDef "LeftShift" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_INT8 type: DT_INT16 type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_UINT16 type: DT_UINT32 type: DT_UINT64 } } } -} -- | less :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build Bool -- ^ __z__ less = less' id less' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build Bool -- ^ __z__ less' op'options x y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y] return (opDef "Less" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type: DT_BOOL } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_INT64 type: DT_BFLOAT16 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } -} -- | lessEqual :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build Bool -- ^ __z__ lessEqual = lessEqual' id lessEqual' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build Bool -- ^ __z__ lessEqual' op'options x y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y] return (opDef "LessEqual" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type: DT_BOOL } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_INT64 type: DT_BFLOAT16 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } -} -- | lgamma :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ lgamma = lgamma' id lgamma' :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ lgamma' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "Lgamma" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } -} -- | linSpace :: forall v'1 v'2 v'3 t tidx . (OneOf '[Data.Word.Word16, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => Tensor v'1 t -- ^ __start__ -> Tensor v'2 t -- ^ __stop__ -> Tensor v'3 tidx -- ^ __num__ -> Tensor Build t -- ^ __output__ linSpace = linSpace' id linSpace' :: forall v'1 v'2 v'3 t tidx . (OneOf '[Data.Word.Word16, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => OpParams -> Tensor v'1 t -- ^ __start__ -> Tensor v'2 t -- ^ __stop__ -> Tensor v'3 tidx -- ^ __num__ -> Tensor Build t -- ^ __output__ linSpace' op'options start stop num | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs start, buildInputs stop, buildInputs num] return (opDef "LinSpace" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tidx" .~ tensorType (undefined :: tidx) & op'options & opInputs .~ op'inputs) {- input_arg { name: "start" type_attr: "T" } input_arg { name: "stop" type_attr: "T" } input_arg { name: "num" type_attr: "Tidx" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "Tidx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | listDiff :: forall v'1 v'2 t out_idx . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] out_idx) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> (Tensor Build t, Tensor Build out_idx) -- ^ (__out__, __idx__) -- -- * __out__ -- -- * __idx__ listDiff = listDiff' id listDiff' :: forall v'1 v'2 t out_idx . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] out_idx) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> (Tensor Build t, Tensor Build out_idx) -- ^ (__out__, __idx__) -- -- * __out__ -- -- * __idx__ listDiff' op'options x y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y] return (opDef "ListDiff" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "out_idx" .~ tensorType (undefined :: out_idx) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "out" type_attr: "T" } output_arg { name: "idx" type_attr: "out_idx" } attr { name: "T" type: "type" } attr { name: "out_idx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | loadAndRemapMatrix :: forall v'1 v'2 v'3 v'4 v'5 m' . (MonadBuild m') => Data.Int.Int64 -- ^ __num_cols__ -> Data.Int.Int64 -- ^ __num_rows__ -> Tensor v'1 Data.ByteString.ByteString -- ^ __ckpt_path__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __old_tensor_name__ -> Tensor v'3 Data.Int.Int64 -- ^ __row_remapping__ -> Tensor v'4 Data.Int.Int64 -- ^ __col_remapping__ -> Tensor v'5 Float -- ^ __initializing_values__ -> m' (Tensor Value Float) -- ^ __output_matrix__ loadAndRemapMatrix = loadAndRemapMatrix' id loadAndRemapMatrix' :: forall v'1 v'2 v'3 v'4 v'5 m' . (MonadBuild m') => OpParams -> Data.Int.Int64 -- ^ __num_cols__ -> Data.Int.Int64 -- ^ __num_rows__ -> Tensor v'1 Data.ByteString.ByteString -- ^ __ckpt_path__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __old_tensor_name__ -> Tensor v'3 Data.Int.Int64 -- ^ __row_remapping__ -> Tensor v'4 Data.Int.Int64 -- ^ __col_remapping__ -> Tensor v'5 Float -- ^ __initializing_values__ -> m' (Tensor Value Float) -- ^ __output_matrix__ loadAndRemapMatrix' op'options num_cols num_rows ckpt_path old_tensor_name row_remapping col_remapping initializing_values | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs ckpt_path, buildInputs old_tensor_name, buildInputs row_remapping, buildInputs col_remapping, buildInputs initializing_values] buildOp [] (opDef "LoadAndRemapMatrix" & opAttr "num_cols" .~ num_cols & opAttr "num_rows" .~ num_rows & op'options & opInputs .~ op'inputs) {- input_arg { name: "ckpt_path" type: DT_STRING } input_arg { name: "old_tensor_name" type: DT_STRING } input_arg { name: "row_remapping" type: DT_INT64 } input_arg { name: "col_remapping" type: DT_INT64 } input_arg { name: "initializing_values" type: DT_FLOAT } output_arg { name: "output_matrix" type: DT_FLOAT } attr { name: "num_rows" type: "int" has_minimum: true } attr { name: "num_cols" type: "int" has_minimum: true minimum: 1 } attr { name: "max_rows_in_memory" type: "int" default_value { i: -1 } } -} -- | loadTPUEmbeddingADAMParameters :: forall v'1 v'2 v'3 m' . (MonadBuild m') => Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> Tensor v'1 Float -- ^ __parameters__ -> Tensor v'2 Float -- ^ __momenta__ -> Tensor v'3 Float -- ^ __velocities__ -> m' (ControlNode) loadTPUEmbeddingADAMParameters = loadTPUEmbeddingADAMParameters' id loadTPUEmbeddingADAMParameters' :: forall v'1 v'2 v'3 m' . (MonadBuild m') => OpParams -> Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> Tensor v'1 Float -- ^ __parameters__ -> Tensor v'2 Float -- ^ __momenta__ -> Tensor v'3 Float -- ^ __velocities__ -> m' (ControlNode) loadTPUEmbeddingADAMParameters' op'options num_shards shard_id parameters momenta velocities | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs parameters, buildInputs momenta, buildInputs velocities] buildOp [] (opDef "LoadTPUEmbeddingADAMParameters" & opAttr "num_shards" .~ num_shards & opAttr "shard_id" .~ shard_id & op'options & opInputs .~ op'inputs) {- input_arg { name: "parameters" type: DT_FLOAT } input_arg { name: "momenta" type: DT_FLOAT } input_arg { name: "velocities" type: DT_FLOAT } attr { name: "table_id" type: "int" default_value { i: -1 } has_minimum: true minimum: -1 } attr { name: "table_name" type: "string" default_value { s: "" } } attr { name: "num_shards" type: "int" } attr { name: "shard_id" type: "int" } -} -- | loadTPUEmbeddingADAMParametersGradAccumDebug :: forall v'1 v'2 v'3 v'4 m' . (MonadBuild m') => Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> Tensor v'1 Float -- ^ __parameters__ -> Tensor v'2 Float -- ^ __momenta__ -> Tensor v'3 Float -- ^ __velocities__ -> Tensor v'4 Float -- ^ __gradient_accumulators__ -> m' (ControlNode) loadTPUEmbeddingADAMParametersGradAccumDebug = loadTPUEmbeddingADAMParametersGradAccumDebug' id loadTPUEmbeddingADAMParametersGradAccumDebug' :: forall v'1 v'2 v'3 v'4 m' . (MonadBuild m') => OpParams -> Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> Tensor v'1 Float -- ^ __parameters__ -> Tensor v'2 Float -- ^ __momenta__ -> Tensor v'3 Float -- ^ __velocities__ -> Tensor v'4 Float -- ^ __gradient_accumulators__ -> m' (ControlNode) loadTPUEmbeddingADAMParametersGradAccumDebug' op'options num_shards shard_id parameters momenta velocities gradient_accumulators | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs parameters, buildInputs momenta, buildInputs velocities, buildInputs gradient_accumulators] buildOp [] (opDef "LoadTPUEmbeddingADAMParametersGradAccumDebug" & opAttr "num_shards" .~ num_shards & opAttr "shard_id" .~ shard_id & op'options & opInputs .~ op'inputs) {- input_arg { name: "parameters" type: DT_FLOAT } input_arg { name: "momenta" type: DT_FLOAT } input_arg { name: "velocities" type: DT_FLOAT } input_arg { name: "gradient_accumulators" type: DT_FLOAT } attr { name: "table_id" type: "int" default_value { i: -1 } has_minimum: true minimum: -1 } attr { name: "table_name" type: "string" default_value { s: "" } } attr { name: "num_shards" type: "int" } attr { name: "shard_id" type: "int" } -} -- | loadTPUEmbeddingAdadeltaParameters :: forall v'1 v'2 v'3 m' . (MonadBuild m') => Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> Tensor v'1 Float -- ^ __parameters__ -> Tensor v'2 Float -- ^ __accumulators__ -> Tensor v'3 Float -- ^ __updates__ -> m' (ControlNode) loadTPUEmbeddingAdadeltaParameters = loadTPUEmbeddingAdadeltaParameters' id loadTPUEmbeddingAdadeltaParameters' :: forall v'1 v'2 v'3 m' . (MonadBuild m') => OpParams -> Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> Tensor v'1 Float -- ^ __parameters__ -> Tensor v'2 Float -- ^ __accumulators__ -> Tensor v'3 Float -- ^ __updates__ -> m' (ControlNode) loadTPUEmbeddingAdadeltaParameters' op'options num_shards shard_id parameters accumulators updates | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs parameters, buildInputs accumulators, buildInputs updates] buildOp [] (opDef "LoadTPUEmbeddingAdadeltaParameters" & opAttr "num_shards" .~ num_shards & opAttr "shard_id" .~ shard_id & op'options & opInputs .~ op'inputs) {- input_arg { name: "parameters" type: DT_FLOAT } input_arg { name: "accumulators" type: DT_FLOAT } input_arg { name: "updates" type: DT_FLOAT } attr { name: "table_id" type: "int" default_value { i: -1 } has_minimum: true minimum: -1 } attr { name: "table_name" type: "string" default_value { s: "" } } attr { name: "num_shards" type: "int" } attr { name: "shard_id" type: "int" } -} -- | loadTPUEmbeddingAdadeltaParametersGradAccumDebug :: forall v'1 v'2 v'3 v'4 m' . (MonadBuild m') => Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> Tensor v'1 Float -- ^ __parameters__ -> Tensor v'2 Float -- ^ __accumulators__ -> Tensor v'3 Float -- ^ __updates__ -> Tensor v'4 Float -- ^ __gradient_accumulators__ -> m' (ControlNode) loadTPUEmbeddingAdadeltaParametersGradAccumDebug = loadTPUEmbeddingAdadeltaParametersGradAccumDebug' id loadTPUEmbeddingAdadeltaParametersGradAccumDebug' :: forall v'1 v'2 v'3 v'4 m' . (MonadBuild m') => OpParams -> Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> Tensor v'1 Float -- ^ __parameters__ -> Tensor v'2 Float -- ^ __accumulators__ -> Tensor v'3 Float -- ^ __updates__ -> Tensor v'4 Float -- ^ __gradient_accumulators__ -> m' (ControlNode) loadTPUEmbeddingAdadeltaParametersGradAccumDebug' op'options num_shards shard_id parameters accumulators updates gradient_accumulators | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs parameters, buildInputs accumulators, buildInputs updates, buildInputs gradient_accumulators] buildOp [] (opDef "LoadTPUEmbeddingAdadeltaParametersGradAccumDebug" & opAttr "num_shards" .~ num_shards & opAttr "shard_id" .~ shard_id & op'options & opInputs .~ op'inputs) {- input_arg { name: "parameters" type: DT_FLOAT } input_arg { name: "accumulators" type: DT_FLOAT } input_arg { name: "updates" type: DT_FLOAT } input_arg { name: "gradient_accumulators" type: DT_FLOAT } attr { name: "table_id" type: "int" default_value { i: -1 } has_minimum: true minimum: -1 } attr { name: "table_name" type: "string" default_value { s: "" } } attr { name: "num_shards" type: "int" } attr { name: "shard_id" type: "int" } -} -- | loadTPUEmbeddingAdagradParameters :: forall v'1 v'2 m' . (MonadBuild m') => Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> Tensor v'1 Float -- ^ __parameters__ -> Tensor v'2 Float -- ^ __accumulators__ -> m' (ControlNode) loadTPUEmbeddingAdagradParameters = loadTPUEmbeddingAdagradParameters' id loadTPUEmbeddingAdagradParameters' :: forall v'1 v'2 m' . (MonadBuild m') => OpParams -> Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> Tensor v'1 Float -- ^ __parameters__ -> Tensor v'2 Float -- ^ __accumulators__ -> m' (ControlNode) loadTPUEmbeddingAdagradParameters' op'options num_shards shard_id parameters accumulators | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs parameters, buildInputs accumulators] buildOp [] (opDef "LoadTPUEmbeddingAdagradParameters" & opAttr "num_shards" .~ num_shards & opAttr "shard_id" .~ shard_id & op'options & opInputs .~ op'inputs) {- input_arg { name: "parameters" type: DT_FLOAT } input_arg { name: "accumulators" type: DT_FLOAT } attr { name: "table_id" type: "int" default_value { i: -1 } has_minimum: true minimum: -1 } attr { name: "table_name" type: "string" default_value { s: "" } } attr { name: "num_shards" type: "int" } attr { name: "shard_id" type: "int" } -} -- | loadTPUEmbeddingAdagradParametersGradAccumDebug :: forall v'1 v'2 v'3 m' . (MonadBuild m') => Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> Tensor v'1 Float -- ^ __parameters__ -> Tensor v'2 Float -- ^ __accumulators__ -> Tensor v'3 Float -- ^ __gradient_accumulators__ -> m' (ControlNode) loadTPUEmbeddingAdagradParametersGradAccumDebug = loadTPUEmbeddingAdagradParametersGradAccumDebug' id loadTPUEmbeddingAdagradParametersGradAccumDebug' :: forall v'1 v'2 v'3 m' . (MonadBuild m') => OpParams -> Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> Tensor v'1 Float -- ^ __parameters__ -> Tensor v'2 Float -- ^ __accumulators__ -> Tensor v'3 Float -- ^ __gradient_accumulators__ -> m' (ControlNode) loadTPUEmbeddingAdagradParametersGradAccumDebug' op'options num_shards shard_id parameters accumulators gradient_accumulators | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs parameters, buildInputs accumulators, buildInputs gradient_accumulators] buildOp [] (opDef "LoadTPUEmbeddingAdagradParametersGradAccumDebug" & opAttr "num_shards" .~ num_shards & opAttr "shard_id" .~ shard_id & op'options & opInputs .~ op'inputs) {- input_arg { name: "parameters" type: DT_FLOAT } input_arg { name: "accumulators" type: DT_FLOAT } input_arg { name: "gradient_accumulators" type: DT_FLOAT } attr { name: "table_id" type: "int" default_value { i: -1 } has_minimum: true minimum: -1 } attr { name: "table_name" type: "string" default_value { s: "" } } attr { name: "num_shards" type: "int" } attr { name: "shard_id" type: "int" } -} -- | loadTPUEmbeddingCenteredRMSPropParameters :: forall v'1 v'2 v'3 v'4 m' . (MonadBuild m') => Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> Tensor v'1 Float -- ^ __parameters__ -> Tensor v'2 Float -- ^ __ms__ -> Tensor v'3 Float -- ^ __mom__ -> Tensor v'4 Float -- ^ __mg__ -> m' (ControlNode) loadTPUEmbeddingCenteredRMSPropParameters = loadTPUEmbeddingCenteredRMSPropParameters' id loadTPUEmbeddingCenteredRMSPropParameters' :: forall v'1 v'2 v'3 v'4 m' . (MonadBuild m') => OpParams -> Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> Tensor v'1 Float -- ^ __parameters__ -> Tensor v'2 Float -- ^ __ms__ -> Tensor v'3 Float -- ^ __mom__ -> Tensor v'4 Float -- ^ __mg__ -> m' (ControlNode) loadTPUEmbeddingCenteredRMSPropParameters' op'options num_shards shard_id parameters ms mom mg | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs parameters, buildInputs ms, buildInputs mom, buildInputs mg] buildOp [] (opDef "LoadTPUEmbeddingCenteredRMSPropParameters" & opAttr "num_shards" .~ num_shards & opAttr "shard_id" .~ shard_id & op'options & opInputs .~ op'inputs) {- input_arg { name: "parameters" type: DT_FLOAT } input_arg { name: "ms" type: DT_FLOAT } input_arg { name: "mom" type: DT_FLOAT } input_arg { name: "mg" type: DT_FLOAT } attr { name: "table_id" type: "int" default_value { i: -1 } has_minimum: true minimum: -1 } attr { name: "table_name" type: "string" default_value { s: "" } } attr { name: "num_shards" type: "int" } attr { name: "shard_id" type: "int" } -} -- | loadTPUEmbeddingFTRLParameters :: forall v'1 v'2 v'3 m' . (MonadBuild m') => Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> Tensor v'1 Float -- ^ __parameters__ -> Tensor v'2 Float -- ^ __accumulators__ -> Tensor v'3 Float -- ^ __linears__ -> m' (ControlNode) loadTPUEmbeddingFTRLParameters = loadTPUEmbeddingFTRLParameters' id loadTPUEmbeddingFTRLParameters' :: forall v'1 v'2 v'3 m' . (MonadBuild m') => OpParams -> Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> Tensor v'1 Float -- ^ __parameters__ -> Tensor v'2 Float -- ^ __accumulators__ -> Tensor v'3 Float -- ^ __linears__ -> m' (ControlNode) loadTPUEmbeddingFTRLParameters' op'options num_shards shard_id parameters accumulators linears | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs parameters, buildInputs accumulators, buildInputs linears] buildOp [] (opDef "LoadTPUEmbeddingFTRLParameters" & opAttr "num_shards" .~ num_shards & opAttr "shard_id" .~ shard_id & op'options & opInputs .~ op'inputs) {- input_arg { name: "parameters" type: DT_FLOAT } input_arg { name: "accumulators" type: DT_FLOAT } input_arg { name: "linears" type: DT_FLOAT } attr { name: "table_id" type: "int" default_value { i: -1 } has_minimum: true minimum: -1 } attr { name: "table_name" type: "string" default_value { s: "" } } attr { name: "num_shards" type: "int" } attr { name: "shard_id" type: "int" } -} -- | loadTPUEmbeddingFTRLParametersGradAccumDebug :: forall v'1 v'2 v'3 v'4 m' . (MonadBuild m') => Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> Tensor v'1 Float -- ^ __parameters__ -> Tensor v'2 Float -- ^ __accumulators__ -> Tensor v'3 Float -- ^ __linears__ -> Tensor v'4 Float -- ^ __gradient_accumulators__ -> m' (ControlNode) loadTPUEmbeddingFTRLParametersGradAccumDebug = loadTPUEmbeddingFTRLParametersGradAccumDebug' id loadTPUEmbeddingFTRLParametersGradAccumDebug' :: forall v'1 v'2 v'3 v'4 m' . (MonadBuild m') => OpParams -> Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> Tensor v'1 Float -- ^ __parameters__ -> Tensor v'2 Float -- ^ __accumulators__ -> Tensor v'3 Float -- ^ __linears__ -> Tensor v'4 Float -- ^ __gradient_accumulators__ -> m' (ControlNode) loadTPUEmbeddingFTRLParametersGradAccumDebug' op'options num_shards shard_id parameters accumulators linears gradient_accumulators | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs parameters, buildInputs accumulators, buildInputs linears, buildInputs gradient_accumulators] buildOp [] (opDef "LoadTPUEmbeddingFTRLParametersGradAccumDebug" & opAttr "num_shards" .~ num_shards & opAttr "shard_id" .~ shard_id & op'options & opInputs .~ op'inputs) {- input_arg { name: "parameters" type: DT_FLOAT } input_arg { name: "accumulators" type: DT_FLOAT } input_arg { name: "linears" type: DT_FLOAT } input_arg { name: "gradient_accumulators" type: DT_FLOAT } attr { name: "table_id" type: "int" default_value { i: -1 } has_minimum: true minimum: -1 } attr { name: "table_name" type: "string" default_value { s: "" } } attr { name: "num_shards" type: "int" } attr { name: "shard_id" type: "int" } -} -- | loadTPUEmbeddingMDLAdagradLightParameters :: forall v'1 v'2 v'3 v'4 m' . (MonadBuild m') => Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> Tensor v'1 Float -- ^ __parameters__ -> Tensor v'2 Float -- ^ __accumulators__ -> Tensor v'3 Float -- ^ __weights__ -> Tensor v'4 Float -- ^ __benefits__ -> m' (ControlNode) loadTPUEmbeddingMDLAdagradLightParameters = loadTPUEmbeddingMDLAdagradLightParameters' id loadTPUEmbeddingMDLAdagradLightParameters' :: forall v'1 v'2 v'3 v'4 m' . (MonadBuild m') => OpParams -> Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> Tensor v'1 Float -- ^ __parameters__ -> Tensor v'2 Float -- ^ __accumulators__ -> Tensor v'3 Float -- ^ __weights__ -> Tensor v'4 Float -- ^ __benefits__ -> m' (ControlNode) loadTPUEmbeddingMDLAdagradLightParameters' op'options num_shards shard_id parameters accumulators weights benefits | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs parameters, buildInputs accumulators, buildInputs weights, buildInputs benefits] buildOp [] (opDef "LoadTPUEmbeddingMDLAdagradLightParameters" & opAttr "num_shards" .~ num_shards & opAttr "shard_id" .~ shard_id & op'options & opInputs .~ op'inputs) {- input_arg { name: "parameters" type: DT_FLOAT } input_arg { name: "accumulators" type: DT_FLOAT } input_arg { name: "weights" type: DT_FLOAT } input_arg { name: "benefits" type: DT_FLOAT } attr { name: "table_id" type: "int" default_value { i: -1 } has_minimum: true minimum: -1 } attr { name: "table_name" type: "string" default_value { s: "" } } attr { name: "num_shards" type: "int" } attr { name: "shard_id" type: "int" } -} -- | loadTPUEmbeddingMomentumParameters :: forall v'1 v'2 m' . (MonadBuild m') => Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> Tensor v'1 Float -- ^ __parameters__ -> Tensor v'2 Float -- ^ __momenta__ -> m' (ControlNode) loadTPUEmbeddingMomentumParameters = loadTPUEmbeddingMomentumParameters' id loadTPUEmbeddingMomentumParameters' :: forall v'1 v'2 m' . (MonadBuild m') => OpParams -> Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> Tensor v'1 Float -- ^ __parameters__ -> Tensor v'2 Float -- ^ __momenta__ -> m' (ControlNode) loadTPUEmbeddingMomentumParameters' op'options num_shards shard_id parameters momenta | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs parameters, buildInputs momenta] buildOp [] (opDef "LoadTPUEmbeddingMomentumParameters" & opAttr "num_shards" .~ num_shards & opAttr "shard_id" .~ shard_id & op'options & opInputs .~ op'inputs) {- input_arg { name: "parameters" type: DT_FLOAT } input_arg { name: "momenta" type: DT_FLOAT } attr { name: "table_id" type: "int" default_value { i: -1 } has_minimum: true minimum: -1 } attr { name: "table_name" type: "string" default_value { s: "" } } attr { name: "num_shards" type: "int" } attr { name: "shard_id" type: "int" } -} -- | loadTPUEmbeddingMomentumParametersGradAccumDebug :: forall v'1 v'2 v'3 m' . (MonadBuild m') => Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> Tensor v'1 Float -- ^ __parameters__ -> Tensor v'2 Float -- ^ __momenta__ -> Tensor v'3 Float -- ^ __gradient_accumulators__ -> m' (ControlNode) loadTPUEmbeddingMomentumParametersGradAccumDebug = loadTPUEmbeddingMomentumParametersGradAccumDebug' id loadTPUEmbeddingMomentumParametersGradAccumDebug' :: forall v'1 v'2 v'3 m' . (MonadBuild m') => OpParams -> Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> Tensor v'1 Float -- ^ __parameters__ -> Tensor v'2 Float -- ^ __momenta__ -> Tensor v'3 Float -- ^ __gradient_accumulators__ -> m' (ControlNode) loadTPUEmbeddingMomentumParametersGradAccumDebug' op'options num_shards shard_id parameters momenta gradient_accumulators | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs parameters, buildInputs momenta, buildInputs gradient_accumulators] buildOp [] (opDef "LoadTPUEmbeddingMomentumParametersGradAccumDebug" & opAttr "num_shards" .~ num_shards & opAttr "shard_id" .~ shard_id & op'options & opInputs .~ op'inputs) {- input_arg { name: "parameters" type: DT_FLOAT } input_arg { name: "momenta" type: DT_FLOAT } input_arg { name: "gradient_accumulators" type: DT_FLOAT } attr { name: "table_id" type: "int" default_value { i: -1 } has_minimum: true minimum: -1 } attr { name: "table_name" type: "string" default_value { s: "" } } attr { name: "num_shards" type: "int" } attr { name: "shard_id" type: "int" } -} -- | loadTPUEmbeddingProximalAdagradParameters :: forall v'1 v'2 m' . (MonadBuild m') => Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> Tensor v'1 Float -- ^ __parameters__ -> Tensor v'2 Float -- ^ __accumulators__ -> m' (ControlNode) loadTPUEmbeddingProximalAdagradParameters = loadTPUEmbeddingProximalAdagradParameters' id loadTPUEmbeddingProximalAdagradParameters' :: forall v'1 v'2 m' . (MonadBuild m') => OpParams -> Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> Tensor v'1 Float -- ^ __parameters__ -> Tensor v'2 Float -- ^ __accumulators__ -> m' (ControlNode) loadTPUEmbeddingProximalAdagradParameters' op'options num_shards shard_id parameters accumulators | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs parameters, buildInputs accumulators] buildOp [] (opDef "LoadTPUEmbeddingProximalAdagradParameters" & opAttr "num_shards" .~ num_shards & opAttr "shard_id" .~ shard_id & op'options & opInputs .~ op'inputs) {- input_arg { name: "parameters" type: DT_FLOAT } input_arg { name: "accumulators" type: DT_FLOAT } attr { name: "table_id" type: "int" default_value { i: -1 } has_minimum: true minimum: -1 } attr { name: "table_name" type: "string" default_value { s: "" } } attr { name: "num_shards" type: "int" } attr { name: "shard_id" type: "int" } -} -- | loadTPUEmbeddingProximalAdagradParametersGradAccumDebug :: forall v'1 v'2 v'3 m' . (MonadBuild m') => Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> Tensor v'1 Float -- ^ __parameters__ -> Tensor v'2 Float -- ^ __accumulators__ -> Tensor v'3 Float -- ^ __gradient_accumulators__ -> m' (ControlNode) loadTPUEmbeddingProximalAdagradParametersGradAccumDebug = loadTPUEmbeddingProximalAdagradParametersGradAccumDebug' id loadTPUEmbeddingProximalAdagradParametersGradAccumDebug' :: forall v'1 v'2 v'3 m' . (MonadBuild m') => OpParams -> Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> Tensor v'1 Float -- ^ __parameters__ -> Tensor v'2 Float -- ^ __accumulators__ -> Tensor v'3 Float -- ^ __gradient_accumulators__ -> m' (ControlNode) loadTPUEmbeddingProximalAdagradParametersGradAccumDebug' op'options num_shards shard_id parameters accumulators gradient_accumulators | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs parameters, buildInputs accumulators, buildInputs gradient_accumulators] buildOp [] (opDef "LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug" & opAttr "num_shards" .~ num_shards & opAttr "shard_id" .~ shard_id & op'options & opInputs .~ op'inputs) {- input_arg { name: "parameters" type: DT_FLOAT } input_arg { name: "accumulators" type: DT_FLOAT } input_arg { name: "gradient_accumulators" type: DT_FLOAT } attr { name: "table_id" type: "int" default_value { i: -1 } has_minimum: true minimum: -1 } attr { name: "table_name" type: "string" default_value { s: "" } } attr { name: "num_shards" type: "int" } attr { name: "shard_id" type: "int" } -} -- | loadTPUEmbeddingRMSPropParameters :: forall v'1 v'2 v'3 m' . (MonadBuild m') => Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> Tensor v'1 Float -- ^ __parameters__ -> Tensor v'2 Float -- ^ __ms__ -> Tensor v'3 Float -- ^ __mom__ -> m' (ControlNode) loadTPUEmbeddingRMSPropParameters = loadTPUEmbeddingRMSPropParameters' id loadTPUEmbeddingRMSPropParameters' :: forall v'1 v'2 v'3 m' . (MonadBuild m') => OpParams -> Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> Tensor v'1 Float -- ^ __parameters__ -> Tensor v'2 Float -- ^ __ms__ -> Tensor v'3 Float -- ^ __mom__ -> m' (ControlNode) loadTPUEmbeddingRMSPropParameters' op'options num_shards shard_id parameters ms mom | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs parameters, buildInputs ms, buildInputs mom] buildOp [] (opDef "LoadTPUEmbeddingRMSPropParameters" & opAttr "num_shards" .~ num_shards & opAttr "shard_id" .~ shard_id & op'options & opInputs .~ op'inputs) {- input_arg { name: "parameters" type: DT_FLOAT } input_arg { name: "ms" type: DT_FLOAT } input_arg { name: "mom" type: DT_FLOAT } attr { name: "table_id" type: "int" default_value { i: -1 } has_minimum: true minimum: -1 } attr { name: "table_name" type: "string" default_value { s: "" } } attr { name: "num_shards" type: "int" } attr { name: "shard_id" type: "int" } -} -- | loadTPUEmbeddingRMSPropParametersGradAccumDebug :: forall v'1 v'2 v'3 v'4 m' . (MonadBuild m') => Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> Tensor v'1 Float -- ^ __parameters__ -> Tensor v'2 Float -- ^ __ms__ -> Tensor v'3 Float -- ^ __mom__ -> Tensor v'4 Float -- ^ __gradient_accumulators__ -> m' (ControlNode) loadTPUEmbeddingRMSPropParametersGradAccumDebug = loadTPUEmbeddingRMSPropParametersGradAccumDebug' id loadTPUEmbeddingRMSPropParametersGradAccumDebug' :: forall v'1 v'2 v'3 v'4 m' . (MonadBuild m') => OpParams -> Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> Tensor v'1 Float -- ^ __parameters__ -> Tensor v'2 Float -- ^ __ms__ -> Tensor v'3 Float -- ^ __mom__ -> Tensor v'4 Float -- ^ __gradient_accumulators__ -> m' (ControlNode) loadTPUEmbeddingRMSPropParametersGradAccumDebug' op'options num_shards shard_id parameters ms mom gradient_accumulators | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs parameters, buildInputs ms, buildInputs mom, buildInputs gradient_accumulators] buildOp [] (opDef "LoadTPUEmbeddingRMSPropParametersGradAccumDebug" & opAttr "num_shards" .~ num_shards & opAttr "shard_id" .~ shard_id & op'options & opInputs .~ op'inputs) {- input_arg { name: "parameters" type: DT_FLOAT } input_arg { name: "ms" type: DT_FLOAT } input_arg { name: "mom" type: DT_FLOAT } input_arg { name: "gradient_accumulators" type: DT_FLOAT } attr { name: "table_id" type: "int" default_value { i: -1 } has_minimum: true minimum: -1 } attr { name: "table_name" type: "string" default_value { s: "" } } attr { name: "num_shards" type: "int" } attr { name: "shard_id" type: "int" } -} -- | loadTPUEmbeddingStochasticGradientDescentParameters :: forall v'1 m' . (MonadBuild m') => Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> Tensor v'1 Float -- ^ __parameters__ -> m' (ControlNode) loadTPUEmbeddingStochasticGradientDescentParameters = loadTPUEmbeddingStochasticGradientDescentParameters' id loadTPUEmbeddingStochasticGradientDescentParameters' :: forall v'1 m' . (MonadBuild m') => OpParams -> Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> Tensor v'1 Float -- ^ __parameters__ -> m' (ControlNode) loadTPUEmbeddingStochasticGradientDescentParameters' op'options num_shards shard_id parameters | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs parameters] buildOp [] (opDef "LoadTPUEmbeddingStochasticGradientDescentParameters" & opAttr "num_shards" .~ num_shards & opAttr "shard_id" .~ shard_id & op'options & opInputs .~ op'inputs) {- input_arg { name: "parameters" type: DT_FLOAT } attr { name: "table_id" type: "int" default_value { i: -1 } has_minimum: true minimum: -1 } attr { name: "table_name" type: "string" default_value { s: "" } } attr { name: "num_shards" type: "int" } attr { name: "shard_id" type: "int" } -} -- | log :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ log = log' id log' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ log' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "Log" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | log1p :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ log1p = log1p' id log1p' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ log1p' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "Log1p" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | logMatrixDeterminant :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __input__ -> (Tensor Build t, Tensor Build t) -- ^ (__sign__, __log_abs_determinant__) -- -- * __sign__ -- -- * __log_abs_determinant__ logMatrixDeterminant = logMatrixDeterminant' id logMatrixDeterminant' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> (Tensor Build t, Tensor Build t) -- ^ (__sign__, __log_abs_determinant__) -- -- * __sign__ -- -- * __log_abs_determinant__ logMatrixDeterminant' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "LogMatrixDeterminant" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "sign" type_attr: "T" } output_arg { name: "log_abs_determinant" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | logSoftmax :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __logits__ -> Tensor Build t -- ^ __logsoftmax__ logSoftmax = logSoftmax' id logSoftmax' :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __logits__ -> Tensor Build t -- ^ __logsoftmax__ logSoftmax' op'options logits | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs logits] return (opDef "LogSoftmax" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "logits" type_attr: "T" } output_arg { name: "logsoftmax" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_BFLOAT16 type: DT_FLOAT type: DT_DOUBLE } } } -} -- | logUniformCandidateSampler :: forall v'1 m' . (MonadBuild m') => Data.Int.Int64 -- ^ __num_sampled__ -> Data.Int.Int64 -- ^ __num_true__ -> Data.Int.Int64 -- ^ __range_max__ -> Bool -- ^ __unique__ -> Tensor v'1 Data.Int.Int64 -- ^ __true_classes__ -> m' ((Tensor Value Data.Int.Int64, Tensor Value Float, Tensor Value Float)) -- ^ (__sampled_candidates__, __true_expected_count__, __sampled_expected_count__) -- -- * __sampled_candidates__ -- -- * __true_expected_count__ -- -- * __sampled_expected_count__ logUniformCandidateSampler = logUniformCandidateSampler' id logUniformCandidateSampler' :: forall v'1 m' . (MonadBuild m') => OpParams -> Data.Int.Int64 -- ^ __num_sampled__ -> Data.Int.Int64 -- ^ __num_true__ -> Data.Int.Int64 -- ^ __range_max__ -> Bool -- ^ __unique__ -> Tensor v'1 Data.Int.Int64 -- ^ __true_classes__ -> m' ((Tensor Value Data.Int.Int64, Tensor Value Float, Tensor Value Float)) -- ^ (__sampled_candidates__, __true_expected_count__, __sampled_expected_count__) -- -- * __sampled_candidates__ -- -- * __true_expected_count__ -- -- * __sampled_expected_count__ logUniformCandidateSampler' op'options num_sampled num_true range_max unique true_classes | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs true_classes] buildOp [] (opDef "LogUniformCandidateSampler" & opAttr "num_sampled" .~ num_sampled & opAttr "num_true" .~ num_true & opAttr "range_max" .~ range_max & opAttr "unique" .~ unique & op'options & opInputs .~ op'inputs) {- input_arg { name: "true_classes" type: DT_INT64 } output_arg { name: "sampled_candidates" type: DT_INT64 } output_arg { name: "true_expected_count" type: DT_FLOAT } output_arg { name: "sampled_expected_count" type: DT_FLOAT } attr { name: "num_true" type: "int" has_minimum: true minimum: 1 } attr { name: "num_sampled" type: "int" has_minimum: true minimum: 1 } attr { name: "unique" type: "bool" } attr { name: "range_max" type: "int" has_minimum: true minimum: 1 } attr { name: "seed" type: "int" default_value { i: 0 } } attr { name: "seed2" type: "int" default_value { i: 0 } } -} -- | logicalAnd :: Tensor v'1 Bool -- ^ __x__ -> Tensor v'2 Bool -- ^ __y__ -> Tensor Build Bool -- ^ __z__ logicalAnd = logicalAnd' id logicalAnd' :: OpParams -> Tensor v'1 Bool -- ^ __x__ -> Tensor v'2 Bool -- ^ __y__ -> Tensor Build Bool -- ^ __z__ logicalAnd' op'options x y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y] return (opDef "LogicalAnd" & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type: DT_BOOL } input_arg { name: "y" type: DT_BOOL } output_arg { name: "z" type: DT_BOOL } -} -- | logicalNot :: Tensor v'1 Bool -- ^ __x__ -> Tensor Build Bool -- ^ __y__ logicalNot = logicalNot' id logicalNot' :: OpParams -> Tensor v'1 Bool -- ^ __x__ -> Tensor Build Bool -- ^ __y__ logicalNot' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "LogicalNot" & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type: DT_BOOL } output_arg { name: "y" type: DT_BOOL } -} -- | logicalOr :: Tensor v'1 Bool -- ^ __x__ -> Tensor v'2 Bool -- ^ __y__ -> Tensor Build Bool -- ^ __z__ logicalOr = logicalOr' id logicalOr' :: OpParams -> Tensor v'1 Bool -- ^ __x__ -> Tensor v'2 Bool -- ^ __y__ -> Tensor Build Bool -- ^ __z__ logicalOr' op'options x y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y] return (opDef "LogicalOr" & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type: DT_BOOL } input_arg { name: "y" type: DT_BOOL } output_arg { name: "z" type: DT_BOOL } -} -- | lookupTableExport :: forall tkeys tvalues m' . (MonadBuild m', TensorType tkeys, TensorType tvalues) => Tensor Ref Data.ByteString.ByteString -- ^ __table_handle__ -> m' ((Tensor Value tkeys, Tensor Value tvalues)) -- ^ (__keys__, __values__) -- -- * __keys__ -- -- * __values__ lookupTableExport = lookupTableExport' id lookupTableExport' :: forall tkeys tvalues m' . (MonadBuild m', TensorType tkeys, TensorType tvalues) => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __table_handle__ -> m' ((Tensor Value tkeys, Tensor Value tvalues)) -- ^ (__keys__, __values__) -- -- * __keys__ -- -- * __values__ lookupTableExport' op'options table_handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs table_handle] buildOp [] (opDef "LookupTableExport" & opAttr "Tkeys" .~ tensorType (undefined :: tkeys) & opAttr "Tvalues" .~ tensorType (undefined :: tvalues) & op'options & opInputs .~ op'inputs) {- input_arg { name: "table_handle" type: DT_STRING is_ref: true } output_arg { name: "keys" type_attr: "Tkeys" } output_arg { name: "values" type_attr: "Tvalues" } attr { name: "Tkeys" type: "type" } attr { name: "Tvalues" type: "type" } -} -- | lookupTableExportV2 :: forall v'1 tkeys tvalues m' . (MonadBuild m', TensorType tkeys, TensorType tvalues) => Tensor v'1 ResourceHandle -- ^ __table_handle__ -> m' ((Tensor Value tkeys, Tensor Value tvalues)) -- ^ (__keys__, __values__) -- -- * __keys__ -- -- * __values__ lookupTableExportV2 = lookupTableExportV2' id lookupTableExportV2' :: forall v'1 tkeys tvalues m' . (MonadBuild m', TensorType tkeys, TensorType tvalues) => OpParams -> Tensor v'1 ResourceHandle -- ^ __table_handle__ -> m' ((Tensor Value tkeys, Tensor Value tvalues)) -- ^ (__keys__, __values__) -- -- * __keys__ -- -- * __values__ lookupTableExportV2' op'options table_handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs table_handle] buildOp [] (opDef "LookupTableExportV2" & opAttr "Tkeys" .~ tensorType (undefined :: tkeys) & opAttr "Tvalues" .~ tensorType (undefined :: tvalues) & op'options & opInputs .~ op'inputs) {- input_arg { name: "table_handle" type: DT_RESOURCE } output_arg { name: "keys" type_attr: "Tkeys" } output_arg { name: "values" type_attr: "Tvalues" } attr { name: "Tkeys" type: "type" } attr { name: "Tvalues" type: "type" } -} -- | lookupTableFind :: forall v'2 v'3 tin tout m' . (MonadBuild m', TensorType tin, TensorType tout) => Tensor Ref Data.ByteString.ByteString -- ^ __table_handle__ -> Tensor v'2 tin -- ^ __keys__ -> Tensor v'3 tout -- ^ __default_value__ -> m' (Tensor Value tout) -- ^ __values__ lookupTableFind = lookupTableFind' id lookupTableFind' :: forall v'2 v'3 tin tout m' . (MonadBuild m', TensorType tin, TensorType tout) => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __table_handle__ -> Tensor v'2 tin -- ^ __keys__ -> Tensor v'3 tout -- ^ __default_value__ -> m' (Tensor Value tout) -- ^ __values__ lookupTableFind' op'options table_handle keys default_value | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs table_handle, buildInputs keys, buildInputs default_value] buildOp [] (opDef "LookupTableFind" & opAttr "Tin" .~ tensorType (undefined :: tin) & opAttr "Tout" .~ tensorType (undefined :: tout) & op'options & opInputs .~ op'inputs) {- input_arg { name: "table_handle" type: DT_STRING is_ref: true } input_arg { name: "keys" type_attr: "Tin" } input_arg { name: "default_value" type_attr: "Tout" } output_arg { name: "values" type_attr: "Tout" } attr { name: "Tin" type: "type" } attr { name: "Tout" type: "type" } -} -- | lookupTableFindV2 :: forall v'1 v'2 v'3 tin tout m' . (MonadBuild m', TensorType tin, TensorType tout) => Tensor v'1 ResourceHandle -- ^ __table_handle__ -> Tensor v'2 tin -- ^ __keys__ -> Tensor v'3 tout -- ^ __default_value__ -> m' (Tensor Value tout) -- ^ __values__ lookupTableFindV2 = lookupTableFindV2' id lookupTableFindV2' :: forall v'1 v'2 v'3 tin tout m' . (MonadBuild m', TensorType tin, TensorType tout) => OpParams -> Tensor v'1 ResourceHandle -- ^ __table_handle__ -> Tensor v'2 tin -- ^ __keys__ -> Tensor v'3 tout -- ^ __default_value__ -> m' (Tensor Value tout) -- ^ __values__ lookupTableFindV2' op'options table_handle keys default_value | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs table_handle, buildInputs keys, buildInputs default_value] buildOp [] (opDef "LookupTableFindV2" & opAttr "Tin" .~ tensorType (undefined :: tin) & opAttr "Tout" .~ tensorType (undefined :: tout) & op'options & opInputs .~ op'inputs) {- input_arg { name: "table_handle" type: DT_RESOURCE } input_arg { name: "keys" type_attr: "Tin" } input_arg { name: "default_value" type_attr: "Tout" } output_arg { name: "values" type_attr: "Tout" } attr { name: "Tin" type: "type" } attr { name: "Tout" type: "type" } -} -- | lookupTableImport :: forall v'2 v'3 tin tout m' . (MonadBuild m', TensorType tin, TensorType tout) => Tensor Ref Data.ByteString.ByteString -- ^ __table_handle__ -> Tensor v'2 tin -- ^ __keys__ -> Tensor v'3 tout -- ^ __values__ -> m' (ControlNode) lookupTableImport = lookupTableImport' id lookupTableImport' :: forall v'2 v'3 tin tout m' . (MonadBuild m', TensorType tin, TensorType tout) => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __table_handle__ -> Tensor v'2 tin -- ^ __keys__ -> Tensor v'3 tout -- ^ __values__ -> m' (ControlNode) lookupTableImport' op'options table_handle keys values | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs table_handle, buildInputs keys, buildInputs values] buildOp [] (opDef "LookupTableImport" & opAttr "Tin" .~ tensorType (undefined :: tin) & opAttr "Tout" .~ tensorType (undefined :: tout) & op'options & opInputs .~ op'inputs) {- input_arg { name: "table_handle" type: DT_STRING is_ref: true } input_arg { name: "keys" type_attr: "Tin" } input_arg { name: "values" type_attr: "Tout" } attr { name: "Tin" type: "type" } attr { name: "Tout" type: "type" } -} -- | lookupTableImportV2 :: forall v'1 v'2 v'3 tin tout m' . (MonadBuild m', TensorType tin, TensorType tout) => Tensor v'1 ResourceHandle -- ^ __table_handle__ -> Tensor v'2 tin -- ^ __keys__ -> Tensor v'3 tout -- ^ __values__ -> m' (ControlNode) lookupTableImportV2 = lookupTableImportV2' id lookupTableImportV2' :: forall v'1 v'2 v'3 tin tout m' . (MonadBuild m', TensorType tin, TensorType tout) => OpParams -> Tensor v'1 ResourceHandle -- ^ __table_handle__ -> Tensor v'2 tin -- ^ __keys__ -> Tensor v'3 tout -- ^ __values__ -> m' (ControlNode) lookupTableImportV2' op'options table_handle keys values | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs table_handle, buildInputs keys, buildInputs values] buildOp [] (opDef "LookupTableImportV2" & opAttr "Tin" .~ tensorType (undefined :: tin) & opAttr "Tout" .~ tensorType (undefined :: tout) & op'options & opInputs .~ op'inputs) {- input_arg { name: "table_handle" type: DT_RESOURCE } input_arg { name: "keys" type_attr: "Tin" } input_arg { name: "values" type_attr: "Tout" } attr { name: "Tin" type: "type" } attr { name: "Tout" type: "type" } -} -- | lookupTableInsert :: forall v'2 v'3 tin tout m' . (MonadBuild m', TensorType tin, TensorType tout) => Tensor Ref Data.ByteString.ByteString -- ^ __table_handle__ -> Tensor v'2 tin -- ^ __keys__ -> Tensor v'3 tout -- ^ __values__ -> m' (ControlNode) lookupTableInsert = lookupTableInsert' id lookupTableInsert' :: forall v'2 v'3 tin tout m' . (MonadBuild m', TensorType tin, TensorType tout) => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __table_handle__ -> Tensor v'2 tin -- ^ __keys__ -> Tensor v'3 tout -- ^ __values__ -> m' (ControlNode) lookupTableInsert' op'options table_handle keys values | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs table_handle, buildInputs keys, buildInputs values] buildOp [] (opDef "LookupTableInsert" & opAttr "Tin" .~ tensorType (undefined :: tin) & opAttr "Tout" .~ tensorType (undefined :: tout) & op'options & opInputs .~ op'inputs) {- input_arg { name: "table_handle" type: DT_STRING is_ref: true } input_arg { name: "keys" type_attr: "Tin" } input_arg { name: "values" type_attr: "Tout" } attr { name: "Tin" type: "type" } attr { name: "Tout" type: "type" } -} -- | lookupTableInsertV2 :: forall v'1 v'2 v'3 tin tout m' . (MonadBuild m', TensorType tin, TensorType tout) => Tensor v'1 ResourceHandle -- ^ __table_handle__ -> Tensor v'2 tin -- ^ __keys__ -> Tensor v'3 tout -- ^ __values__ -> m' (ControlNode) lookupTableInsertV2 = lookupTableInsertV2' id lookupTableInsertV2' :: forall v'1 v'2 v'3 tin tout m' . (MonadBuild m', TensorType tin, TensorType tout) => OpParams -> Tensor v'1 ResourceHandle -- ^ __table_handle__ -> Tensor v'2 tin -- ^ __keys__ -> Tensor v'3 tout -- ^ __values__ -> m' (ControlNode) lookupTableInsertV2' op'options table_handle keys values | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs table_handle, buildInputs keys, buildInputs values] buildOp [] (opDef "LookupTableInsertV2" & opAttr "Tin" .~ tensorType (undefined :: tin) & opAttr "Tout" .~ tensorType (undefined :: tout) & op'options & opInputs .~ op'inputs) {- input_arg { name: "table_handle" type: DT_RESOURCE } input_arg { name: "keys" type_attr: "Tin" } input_arg { name: "values" type_attr: "Tout" } attr { name: "Tin" type: "type" } attr { name: "Tout" type: "type" } -} -- | lookupTableRemoveV2 :: forall v'1 v'2 tin m' . (MonadBuild m', TensorType tin) => Tensor v'1 ResourceHandle -- ^ __table_handle__ -> Tensor v'2 tin -- ^ __keys__ -> m' (ControlNode) lookupTableRemoveV2 = lookupTableRemoveV2' id lookupTableRemoveV2' :: forall v'1 v'2 tin m' . (MonadBuild m', TensorType tin) => OpParams -> Tensor v'1 ResourceHandle -- ^ __table_handle__ -> Tensor v'2 tin -- ^ __keys__ -> m' (ControlNode) lookupTableRemoveV2' op'options table_handle keys | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs table_handle, buildInputs keys] buildOp [] (opDef "LookupTableRemoveV2" & opAttr "Tin" .~ tensorType (undefined :: tin) & op'options & opInputs .~ op'inputs) {- input_arg { name: "table_handle" type: DT_RESOURCE } input_arg { name: "keys" type_attr: "Tin" } attr { name: "Tin" type: "type" } -} -- | lookupTableSize :: forall m' . (MonadBuild m') => Tensor Ref Data.ByteString.ByteString -- ^ __table_handle__ -> m' (Tensor Value Data.Int.Int64) -- ^ __size__ lookupTableSize = lookupTableSize' id lookupTableSize' :: forall m' . (MonadBuild m') => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __table_handle__ -> m' (Tensor Value Data.Int.Int64) -- ^ __size__ lookupTableSize' op'options table_handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs table_handle] buildOp [] (opDef "LookupTableSize" & op'options & opInputs .~ op'inputs) {- input_arg { name: "table_handle" type: DT_STRING is_ref: true } output_arg { name: "size" type: DT_INT64 } -} -- | lookupTableSizeV2 :: forall v'1 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __table_handle__ -> m' (Tensor Value Data.Int.Int64) -- ^ __size__ lookupTableSizeV2 = lookupTableSizeV2' id lookupTableSizeV2' :: forall v'1 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __table_handle__ -> m' (Tensor Value Data.Int.Int64) -- ^ __size__ lookupTableSizeV2' op'options table_handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs table_handle] buildOp [] (opDef "LookupTableSizeV2" & op'options & opInputs .~ op'inputs) {- input_arg { name: "table_handle" type: DT_RESOURCE } output_arg { name: "size" type: DT_INT64 } -} -- | loopCond :: Tensor v'1 Bool -- ^ __input__ -> Tensor Build Bool -- ^ __output__ loopCond = loopCond' id loopCond' :: OpParams -> Tensor v'1 Bool -- ^ __input__ -> Tensor Build Bool -- ^ __output__ loopCond' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "LoopCond" & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type: DT_BOOL } output_arg { name: "output" type: DT_BOOL } -} -- | lowerBound :: forall v'1 v'2 t out_type . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] out_type) => Tensor v'1 t -- ^ __sorted_inputs__ -> Tensor v'2 t -- ^ __values__ -> Tensor Build out_type -- ^ __output__ lowerBound = lowerBound' id lowerBound' :: forall v'1 v'2 t out_type . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] out_type) => OpParams -> Tensor v'1 t -- ^ __sorted_inputs__ -> Tensor v'2 t -- ^ __values__ -> Tensor Build out_type -- ^ __output__ lowerBound' op'options sorted_inputs values | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs sorted_inputs, buildInputs values] return (opDef "LowerBound" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "out_type" .~ tensorType (undefined :: out_type) & op'options & opInputs .~ op'inputs) {- input_arg { name: "sorted_inputs" type_attr: "T" } input_arg { name: "values" type_attr: "T" } output_arg { name: "output" type_attr: "out_type" } attr { name: "T" type: "type" } attr { name: "out_type" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | lu :: forall v'1 t output_idx_type . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] output_idx_type) => Tensor v'1 t -- ^ __input__ -> (Tensor Build t, Tensor Build output_idx_type) -- ^ (__lu__, __p__) -- -- * __lu__ -- -- * __p__ lu = lu' id lu' :: forall v'1 t output_idx_type . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] output_idx_type) => OpParams -> Tensor v'1 t -- ^ __input__ -> (Tensor Build t, Tensor Build output_idx_type) -- ^ (__lu__, __p__) -- -- * __lu__ -- -- * __p__ lu' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "Lu" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "output_idx_type" .~ tensorType (undefined :: output_idx_type) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "lu" type_attr: "T" } output_arg { name: "p" type_attr: "output_idx_type" } attr { name: "T" type: "type" allowed_values { list { type: DT_DOUBLE type: DT_FLOAT type: DT_HALF type: DT_COMPLEX64 type: DT_COMPLEX128 } } } attr { name: "output_idx_type" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | makeIterator :: forall v'1 v'2 m' . (MonadBuild m') => Tensor v'1 Variant -- ^ __dataset__ -> Tensor v'2 ResourceHandle -- ^ __iterator__ -> m' (ControlNode) makeIterator = makeIterator' id makeIterator' :: forall v'1 v'2 m' . (MonadBuild m') => OpParams -> Tensor v'1 Variant -- ^ __dataset__ -> Tensor v'2 ResourceHandle -- ^ __iterator__ -> m' (ControlNode) makeIterator' op'options dataset iterator | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs dataset, buildInputs iterator] buildOp [] (opDef "MakeIterator" & op'options & opInputs .~ op'inputs) {- input_arg { name: "dataset" type: DT_VARIANT } input_arg { name: "iterator" type: DT_RESOURCE } -} -- | mapClear :: forall m' . (MonadBuild m') => [DataType] -- ^ __dtypes__ -> m' (ControlNode) mapClear = mapClear' id mapClear' :: forall m' . (MonadBuild m') => OpParams -> [DataType] -- ^ __dtypes__ -> m' (ControlNode) mapClear' op'options dtypes | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "MapClear" & opAttr "dtypes" .~ dtypes & op'options & opInputs .~ op'inputs) {- attr { name: "capacity" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "memory_limit" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "dtypes" type: "list(type)" } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } -} -- | mapIncompleteSize :: forall m' . (MonadBuild m') => [DataType] -- ^ __dtypes__ -> m' (Tensor Value Data.Int.Int32) -- ^ __size__ mapIncompleteSize = mapIncompleteSize' id mapIncompleteSize' :: forall m' . (MonadBuild m') => OpParams -> [DataType] -- ^ __dtypes__ -> m' (Tensor Value Data.Int.Int32) -- ^ __size__ mapIncompleteSize' op'options dtypes | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "MapIncompleteSize" & opAttr "dtypes" .~ dtypes & op'options & opInputs .~ op'inputs) {- output_arg { name: "size" type: DT_INT32 } attr { name: "capacity" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "memory_limit" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "dtypes" type: "list(type)" } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } -} -- | mapPeek :: forall v'1 v'2 dtypes m' . (MonadBuild m', TensorTypes dtypes) => Tensor v'1 Data.Int.Int64 -- ^ __key__ -> Tensor v'2 Data.Int.Int32 -- ^ __indices__ -> m' (TensorList (Value) dtypes) -- ^ __values__ mapPeek = mapPeek' id mapPeek' :: forall v'1 v'2 dtypes m' . (MonadBuild m', TensorTypes dtypes) => OpParams -> Tensor v'1 Data.Int.Int64 -- ^ __key__ -> Tensor v'2 Data.Int.Int32 -- ^ __indices__ -> m' (TensorList (Value) dtypes) -- ^ __values__ mapPeek' op'options key indices | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs key, buildInputs indices] buildOp [] (opDef "MapPeek" & opAttr "dtypes" .~ fromTensorTypes (Proxy :: Proxy dtypes) & op'options & opInputs .~ op'inputs) {- input_arg { name: "key" type: DT_INT64 } input_arg { name: "indices" type: DT_INT32 } output_arg { name: "values" type_list_attr: "dtypes" } attr { name: "capacity" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "memory_limit" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "dtypes" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } -} -- | mapSize :: forall m' . (MonadBuild m') => [DataType] -- ^ __dtypes__ -> m' (Tensor Value Data.Int.Int32) -- ^ __size__ mapSize = mapSize' id mapSize' :: forall m' . (MonadBuild m') => OpParams -> [DataType] -- ^ __dtypes__ -> m' (Tensor Value Data.Int.Int32) -- ^ __size__ mapSize' op'options dtypes | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "MapSize" & opAttr "dtypes" .~ dtypes & op'options & opInputs .~ op'inputs) {- output_arg { name: "size" type: DT_INT32 } attr { name: "capacity" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "memory_limit" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "dtypes" type: "list(type)" } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } -} -- | mapStage :: forall v'1 v'2 v'3 fake_dtypes m' . (MonadBuild m', TensorTypes fake_dtypes) => [DataType] -- ^ __dtypes__ -> Tensor v'1 Data.Int.Int64 -- ^ __key__ -> Tensor v'2 Data.Int.Int32 -- ^ __indices__ -> TensorList (v'3) fake_dtypes -- ^ __values__ -> m' (ControlNode) mapStage = mapStage' id mapStage' :: forall v'1 v'2 v'3 fake_dtypes m' . (MonadBuild m', TensorTypes fake_dtypes) => OpParams -> [DataType] -- ^ __dtypes__ -> Tensor v'1 Data.Int.Int64 -- ^ __key__ -> Tensor v'2 Data.Int.Int32 -- ^ __indices__ -> TensorList (v'3) fake_dtypes -- ^ __values__ -> m' (ControlNode) mapStage' op'options dtypes key indices values | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs key, buildInputs indices, buildInputs values] buildOp [] (opDef "MapStage" & opAttr "fake_dtypes" .~ fromTensorTypes (Proxy :: Proxy fake_dtypes) & opAttr "dtypes" .~ dtypes & op'options & opInputs .~ op'inputs) {- input_arg { name: "key" type: DT_INT64 } input_arg { name: "indices" type: DT_INT32 } input_arg { name: "values" type_list_attr: "fake_dtypes" } attr { name: "capacity" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "memory_limit" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "dtypes" type: "list(type)" } attr { name: "fake_dtypes" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } -} -- | mapUnstage :: forall v'1 v'2 dtypes m' . (MonadBuild m', TensorTypes dtypes) => Tensor v'1 Data.Int.Int64 -- ^ __key__ -> Tensor v'2 Data.Int.Int32 -- ^ __indices__ -> m' (TensorList (Value) dtypes) -- ^ __values__ mapUnstage = mapUnstage' id mapUnstage' :: forall v'1 v'2 dtypes m' . (MonadBuild m', TensorTypes dtypes) => OpParams -> Tensor v'1 Data.Int.Int64 -- ^ __key__ -> Tensor v'2 Data.Int.Int32 -- ^ __indices__ -> m' (TensorList (Value) dtypes) -- ^ __values__ mapUnstage' op'options key indices | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs key, buildInputs indices] buildOp [] (opDef "MapUnstage" & opAttr "dtypes" .~ fromTensorTypes (Proxy :: Proxy dtypes) & op'options & opInputs .~ op'inputs) {- input_arg { name: "key" type: DT_INT64 } input_arg { name: "indices" type: DT_INT32 } output_arg { name: "values" type_list_attr: "dtypes" } attr { name: "capacity" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "memory_limit" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "dtypes" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } -} -- | mapUnstageNoKey :: forall v'1 dtypes m' . (MonadBuild m', TensorTypes dtypes) => Tensor v'1 Data.Int.Int32 -- ^ __indices__ -> m' ((Tensor Value Data.Int.Int64, TensorList (Value) dtypes)) -- ^ (__key__, __values__) -- -- * __key__ -- -- * __values__ mapUnstageNoKey = mapUnstageNoKey' id mapUnstageNoKey' :: forall v'1 dtypes m' . (MonadBuild m', TensorTypes dtypes) => OpParams -> Tensor v'1 Data.Int.Int32 -- ^ __indices__ -> m' ((Tensor Value Data.Int.Int64, TensorList (Value) dtypes)) -- ^ (__key__, __values__) -- -- * __key__ -- -- * __values__ mapUnstageNoKey' op'options indices | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs indices] buildOp [] (opDef "MapUnstageNoKey" & opAttr "dtypes" .~ fromTensorTypes (Proxy :: Proxy dtypes) & op'options & opInputs .~ op'inputs) {- input_arg { name: "indices" type: DT_INT32 } output_arg { name: "key" type: DT_INT64 } output_arg { name: "values" type_list_attr: "dtypes" } attr { name: "capacity" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "memory_limit" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "dtypes" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } -} -- | matMul :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __a__ -> Tensor v'2 t -- ^ __b__ -> Tensor Build t -- ^ __product__ matMul = matMul' id matMul' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __a__ -> Tensor v'2 t -- ^ __b__ -> Tensor Build t -- ^ __product__ matMul' op'options a b | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs a, buildInputs b] return (opDef "MatMul" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "a" type_attr: "T" } input_arg { name: "b" type_attr: "T" } output_arg { name: "product" type_attr: "T" } attr { name: "transpose_a" type: "bool" default_value { b: false } } attr { name: "transpose_b" type: "bool" default_value { b: false } } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | matchingFiles :: Tensor v'1 Data.ByteString.ByteString -- ^ __pattern__ -> Tensor Build Data.ByteString.ByteString -- ^ __filenames__ matchingFiles = matchingFiles' id matchingFiles' :: OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __pattern__ -> Tensor Build Data.ByteString.ByteString -- ^ __filenames__ matchingFiles' op'options pattern | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs pattern] return (opDef "MatchingFiles" & op'options & opInputs .~ op'inputs) {- input_arg { name: "pattern" type: DT_STRING } output_arg { name: "filenames" type: DT_STRING } -} -- | matrixBandPart :: forall v'1 v'2 v'3 t tindex . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindex) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 tindex -- ^ __num_lower__ -> Tensor v'3 tindex -- ^ __num_upper__ -> Tensor Build t -- ^ __band__ matrixBandPart = matrixBandPart' id matrixBandPart' :: forall v'1 v'2 v'3 t tindex . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindex) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 tindex -- ^ __num_lower__ -> Tensor v'3 tindex -- ^ __num_upper__ -> Tensor Build t -- ^ __band__ matrixBandPart' op'options input num_lower num_upper | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs num_lower, buildInputs num_upper] return (opDef "MatrixBandPart" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindex" .~ tensorType (undefined :: tindex) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "num_lower" type_attr: "Tindex" } input_arg { name: "num_upper" type_attr: "Tindex" } output_arg { name: "band" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "Tindex" type: "type" default_value { type: DT_INT64 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | matrixDeterminant :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ matrixDeterminant = matrixDeterminant' id matrixDeterminant' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ matrixDeterminant' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "MatrixDeterminant" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | matrixDiag :: forall v'1 t . (TensorType t) => Tensor v'1 t -- ^ __diagonal__ -> Tensor Build t -- ^ __output__ matrixDiag = matrixDiag' id matrixDiag' :: forall v'1 t . (TensorType t) => OpParams -> Tensor v'1 t -- ^ __diagonal__ -> Tensor Build t -- ^ __output__ matrixDiag' op'options diagonal | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs diagonal] return (opDef "MatrixDiag" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "diagonal" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } -} -- | matrixDiagPart :: forall v'1 t . (TensorType t) => Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __diagonal__ matrixDiagPart = matrixDiagPart' id matrixDiagPart' :: forall v'1 t . (TensorType t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __diagonal__ matrixDiagPart' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "MatrixDiagPart" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "diagonal" type_attr: "T" } attr { name: "T" type: "type" } -} -- | matrixExponential :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ matrixExponential = matrixExponential' id matrixExponential' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ matrixExponential' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "MatrixExponential" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_DOUBLE type: DT_FLOAT type: DT_HALF type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | matrixInverse :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ matrixInverse = matrixInverse' id matrixInverse' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ matrixInverse' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "MatrixInverse" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "adjoint" type: "bool" default_value { b: false } } attr { name: "T" type: "type" allowed_values { list { type: DT_DOUBLE type: DT_FLOAT type: DT_HALF type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | matrixLogarithm :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float)] t) => Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ matrixLogarithm = matrixLogarithm' id matrixLogarithm' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float)] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ matrixLogarithm' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "MatrixLogarithm" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | matrixSetDiag :: forall v'1 v'2 t . (TensorType t) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 t -- ^ __diagonal__ -> Tensor Build t -- ^ __output__ matrixSetDiag = matrixSetDiag' id matrixSetDiag' :: forall v'1 v'2 t . (TensorType t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 t -- ^ __diagonal__ -> Tensor Build t -- ^ __output__ matrixSetDiag' op'options input diagonal | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs diagonal] return (opDef "MatrixSetDiag" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "diagonal" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } -} -- | matrixSolve :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __matrix__ -> Tensor v'2 t -- ^ __rhs__ -> Tensor Build t -- ^ __output__ matrixSolve = matrixSolve' id matrixSolve' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __matrix__ -> Tensor v'2 t -- ^ __rhs__ -> Tensor Build t -- ^ __output__ matrixSolve' op'options matrix rhs | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs matrix, buildInputs rhs] return (opDef "MatrixSolve" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "matrix" type_attr: "T" } input_arg { name: "rhs" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "adjoint" type: "bool" default_value { b: false } } attr { name: "T" type: "type" allowed_values { list { type: DT_DOUBLE type: DT_FLOAT type: DT_HALF type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | matrixSolveLs :: forall v'1 v'2 v'3 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __matrix__ -> Tensor v'2 t -- ^ __rhs__ -> Tensor v'3 Double -- ^ __l2_regularizer__ -> Tensor Build t -- ^ __output__ matrixSolveLs = matrixSolveLs' id matrixSolveLs' :: forall v'1 v'2 v'3 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __matrix__ -> Tensor v'2 t -- ^ __rhs__ -> Tensor v'3 Double -- ^ __l2_regularizer__ -> Tensor Build t -- ^ __output__ matrixSolveLs' op'options matrix rhs l2_regularizer | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs matrix, buildInputs rhs, buildInputs l2_regularizer] return (opDef "MatrixSolveLs" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "matrix" type_attr: "T" } input_arg { name: "rhs" type_attr: "T" } input_arg { name: "l2_regularizer" type: DT_DOUBLE } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_DOUBLE type: DT_FLOAT type: DT_HALF type: DT_COMPLEX64 type: DT_COMPLEX128 } } } attr { name: "fast" type: "bool" default_value { b: true } } -} -- | matrixSquareRoot :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ matrixSquareRoot = matrixSquareRoot' id matrixSquareRoot' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ matrixSquareRoot' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "MatrixSquareRoot" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_DOUBLE type: DT_FLOAT type: DT_HALF type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | matrixTriangularSolve :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __matrix__ -> Tensor v'2 t -- ^ __rhs__ -> Tensor Build t -- ^ __output__ matrixTriangularSolve = matrixTriangularSolve' id matrixTriangularSolve' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __matrix__ -> Tensor v'2 t -- ^ __rhs__ -> Tensor Build t -- ^ __output__ matrixTriangularSolve' op'options matrix rhs | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs matrix, buildInputs rhs] return (opDef "MatrixTriangularSolve" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "matrix" type_attr: "T" } input_arg { name: "rhs" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "lower" type: "bool" default_value { b: true } } attr { name: "adjoint" type: "bool" default_value { b: false } } attr { name: "T" type: "type" allowed_values { list { type: DT_DOUBLE type: DT_FLOAT type: DT_HALF type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | max :: forall v'1 v'2 t tidx . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 tidx -- ^ __reduction_indices__ -> Tensor Build t -- ^ __output__ max = max' id max' :: forall v'1 v'2 t tidx . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 tidx -- ^ __reduction_indices__ -> Tensor Build t -- ^ __output__ max' op'options input reduction_indices | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs reduction_indices] return (opDef "Max" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tidx" .~ tensorType (undefined :: tidx) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "reduction_indices" type_attr: "Tidx" } output_arg { name: "output" type_attr: "T" } attr { name: "keep_dims" type: "bool" default_value { b: false } } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tidx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | maxPool :: forall v'1 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ maxPool = maxPool' id maxPool' :: forall v'1 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ maxPool' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "MaxPool" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_HALF type: DT_BFLOAT16 type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_QINT8 } } } attr { name: "ksize" type: "list(int)" has_minimum: true minimum: 4 } attr { name: "strides" type: "list(int)" has_minimum: true minimum: 4 } attr { name: "padding" type: "string" allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "data_format" type: "string" default_value { s: "NHWC" } allowed_values { list { s: "NHWC" s: "NCHW" s: "NCHW_VECT_C" } } } -} -- | maxPool3D :: forall v'1 t . (OneOf '[Data.Word.Word16, Float] t) => Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ maxPool3D = maxPool3D' id maxPool3D' :: forall v'1 t . (OneOf '[Data.Word.Word16, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ maxPool3D' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "MaxPool3D" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "ksize" type: "list(int)" has_minimum: true minimum: 5 } attr { name: "strides" type: "list(int)" has_minimum: true minimum: 5 } attr { name: "padding" type: "string" allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "data_format" type: "string" default_value { s: "NDHWC" } allowed_values { list { s: "NDHWC" s: "NCDHW" } } } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_BFLOAT16 type: DT_FLOAT } } } -} -- | maxPool3DGrad :: forall v'1 v'2 v'3 t tInput . (OneOf '[Data.Word.Word16, Float] t, OneOf '[Data.Word.Word16, Float] tInput) => Tensor v'1 tInput -- ^ __orig_input__ -> Tensor v'2 tInput -- ^ __orig_output__ -> Tensor v'3 t -- ^ __grad__ -> Tensor Build t -- ^ __output__ maxPool3DGrad = maxPool3DGrad' id maxPool3DGrad' :: forall v'1 v'2 v'3 t tInput . (OneOf '[Data.Word.Word16, Float] t, OneOf '[Data.Word.Word16, Float] tInput) => OpParams -> Tensor v'1 tInput -- ^ __orig_input__ -> Tensor v'2 tInput -- ^ __orig_output__ -> Tensor v'3 t -- ^ __grad__ -> Tensor Build t -- ^ __output__ maxPool3DGrad' op'options orig_input orig_output grad | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs orig_input, buildInputs orig_output, buildInputs grad] return (opDef "MaxPool3DGrad" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "TInput" .~ tensorType (undefined :: tInput) & op'options & opInputs .~ op'inputs) {- input_arg { name: "orig_input" type_attr: "TInput" } input_arg { name: "orig_output" type_attr: "TInput" } input_arg { name: "grad" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "ksize" type: "list(int)" has_minimum: true minimum: 5 } attr { name: "strides" type: "list(int)" has_minimum: true minimum: 5 } attr { name: "padding" type: "string" allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "data_format" type: "string" default_value { s: "NDHWC" } allowed_values { list { s: "NDHWC" s: "NCDHW" } } } attr { name: "T" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_HALF type: DT_BFLOAT16 type: DT_FLOAT } } } attr { name: "TInput" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_HALF type: DT_BFLOAT16 type: DT_FLOAT } } } -} -- | maxPool3DGradGrad :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __orig_input__ -> Tensor v'2 t -- ^ __orig_output__ -> Tensor v'3 t -- ^ __grad__ -> Tensor Build t -- ^ __output__ maxPool3DGradGrad = maxPool3DGradGrad' id maxPool3DGradGrad' :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __orig_input__ -> Tensor v'2 t -- ^ __orig_output__ -> Tensor v'3 t -- ^ __grad__ -> Tensor Build t -- ^ __output__ maxPool3DGradGrad' op'options orig_input orig_output grad | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs orig_input, buildInputs orig_output, buildInputs grad] return (opDef "MaxPool3DGradGrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "orig_input" type_attr: "T" } input_arg { name: "orig_output" type_attr: "T" } input_arg { name: "grad" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "ksize" type: "list(int)" has_minimum: true minimum: 5 } attr { name: "strides" type: "list(int)" has_minimum: true minimum: 5 } attr { name: "padding" type: "string" allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "data_format" type: "string" default_value { s: "NDHWC" } allowed_values { list { s: "NDHWC" s: "NCDHW" } } } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_INT64 type: DT_BFLOAT16 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } -} -- | maxPoolGrad :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __orig_input__ -> Tensor v'2 t -- ^ __orig_output__ -> Tensor v'3 t -- ^ __grad__ -> Tensor Build t -- ^ __output__ maxPoolGrad = maxPoolGrad' id maxPoolGrad' :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __orig_input__ -> Tensor v'2 t -- ^ __orig_output__ -> Tensor v'3 t -- ^ __grad__ -> Tensor Build t -- ^ __output__ maxPoolGrad' op'options orig_input orig_output grad | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs orig_input, buildInputs orig_output, buildInputs grad] return (opDef "MaxPoolGrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "orig_input" type_attr: "T" } input_arg { name: "orig_output" type_attr: "T" } input_arg { name: "grad" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "ksize" type: "list(int)" has_minimum: true minimum: 4 } attr { name: "strides" type: "list(int)" has_minimum: true minimum: 4 } attr { name: "padding" type: "string" allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "data_format" type: "string" default_value { s: "NHWC" } allowed_values { list { s: "NHWC" s: "NCHW" } } } attr { name: "T" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_INT64 type: DT_BFLOAT16 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } -} -- | maxPoolGradGrad :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __orig_input__ -> Tensor v'2 t -- ^ __orig_output__ -> Tensor v'3 t -- ^ __grad__ -> Tensor Build t -- ^ __output__ maxPoolGradGrad = maxPoolGradGrad' id maxPoolGradGrad' :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __orig_input__ -> Tensor v'2 t -- ^ __orig_output__ -> Tensor v'3 t -- ^ __grad__ -> Tensor Build t -- ^ __output__ maxPoolGradGrad' op'options orig_input orig_output grad | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs orig_input, buildInputs orig_output, buildInputs grad] return (opDef "MaxPoolGradGrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "orig_input" type_attr: "T" } input_arg { name: "orig_output" type_attr: "T" } input_arg { name: "grad" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "ksize" type: "list(int)" has_minimum: true minimum: 4 } attr { name: "strides" type: "list(int)" has_minimum: true minimum: 4 } attr { name: "padding" type: "string" allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "data_format" type: "string" default_value { s: "NHWC" } allowed_values { list { s: "NHWC" s: "NCHW" } } } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_INT64 type: DT_BFLOAT16 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } -} -- | maxPoolGradGradV2 :: forall v'1 v'2 v'3 v'4 v'5 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __orig_input__ -> Tensor v'2 t -- ^ __orig_output__ -> Tensor v'3 t -- ^ __grad__ -> Tensor v'4 Data.Int.Int32 -- ^ __ksize__ -> Tensor v'5 Data.Int.Int32 -- ^ __strides__ -> Tensor Build t -- ^ __output__ maxPoolGradGradV2 = maxPoolGradGradV2' id maxPoolGradGradV2' :: forall v'1 v'2 v'3 v'4 v'5 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __orig_input__ -> Tensor v'2 t -- ^ __orig_output__ -> Tensor v'3 t -- ^ __grad__ -> Tensor v'4 Data.Int.Int32 -- ^ __ksize__ -> Tensor v'5 Data.Int.Int32 -- ^ __strides__ -> Tensor Build t -- ^ __output__ maxPoolGradGradV2' op'options orig_input orig_output grad ksize strides | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs orig_input, buildInputs orig_output, buildInputs grad, buildInputs ksize, buildInputs strides] return (opDef "MaxPoolGradGradV2" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "orig_input" type_attr: "T" } input_arg { name: "orig_output" type_attr: "T" } input_arg { name: "grad" type_attr: "T" } input_arg { name: "ksize" type: DT_INT32 } input_arg { name: "strides" type: DT_INT32 } output_arg { name: "output" type_attr: "T" } attr { name: "padding" type: "string" allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "data_format" type: "string" default_value { s: "NHWC" } allowed_values { list { s: "NHWC" s: "NCHW" } } } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_INT64 type: DT_BFLOAT16 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } -} -- | maxPoolGradGradWithArgmax :: forall v'1 v'2 v'3 targmax t . (OneOf '[Data.Int.Int32, Data.Int.Int64] targmax, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 t -- ^ __grad__ -> Tensor v'3 targmax -- ^ __argmax__ -> Tensor Build t -- ^ __output__ maxPoolGradGradWithArgmax = maxPoolGradGradWithArgmax' id maxPoolGradGradWithArgmax' :: forall v'1 v'2 v'3 targmax t . (OneOf '[Data.Int.Int32, Data.Int.Int64] targmax, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 t -- ^ __grad__ -> Tensor v'3 targmax -- ^ __argmax__ -> Tensor Build t -- ^ __output__ maxPoolGradGradWithArgmax' op'options input grad argmax | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs grad, buildInputs argmax] return (opDef "MaxPoolGradGradWithArgmax" & opAttr "Targmax" .~ tensorType (undefined :: targmax) & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "grad" type_attr: "T" } input_arg { name: "argmax" type_attr: "Targmax" } output_arg { name: "output" type_attr: "T" } attr { name: "ksize" type: "list(int)" has_minimum: true minimum: 4 } attr { name: "strides" type: "list(int)" has_minimum: true minimum: 4 } attr { name: "padding" type: "string" allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "include_batch_in_index" type: "bool" default_value { b: false } } attr { name: "Targmax" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_INT64 type: DT_BFLOAT16 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } -} -- | maxPoolGradV2 :: forall v'1 v'2 v'3 v'4 v'5 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __orig_input__ -> Tensor v'2 t -- ^ __orig_output__ -> Tensor v'3 t -- ^ __grad__ -> Tensor v'4 Data.Int.Int32 -- ^ __ksize__ -> Tensor v'5 Data.Int.Int32 -- ^ __strides__ -> Tensor Build t -- ^ __output__ maxPoolGradV2 = maxPoolGradV2' id maxPoolGradV2' :: forall v'1 v'2 v'3 v'4 v'5 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __orig_input__ -> Tensor v'2 t -- ^ __orig_output__ -> Tensor v'3 t -- ^ __grad__ -> Tensor v'4 Data.Int.Int32 -- ^ __ksize__ -> Tensor v'5 Data.Int.Int32 -- ^ __strides__ -> Tensor Build t -- ^ __output__ maxPoolGradV2' op'options orig_input orig_output grad ksize strides | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs orig_input, buildInputs orig_output, buildInputs grad, buildInputs ksize, buildInputs strides] return (opDef "MaxPoolGradV2" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "orig_input" type_attr: "T" } input_arg { name: "orig_output" type_attr: "T" } input_arg { name: "grad" type_attr: "T" } input_arg { name: "ksize" type: DT_INT32 } input_arg { name: "strides" type: DT_INT32 } output_arg { name: "output" type_attr: "T" } attr { name: "padding" type: "string" allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "data_format" type: "string" default_value { s: "NHWC" } allowed_values { list { s: "NHWC" s: "NCHW" } } } attr { name: "T" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_INT64 type: DT_BFLOAT16 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } -} -- | maxPoolGradWithArgmax :: forall v'1 v'2 v'3 targmax t . (OneOf '[Data.Int.Int32, Data.Int.Int64] targmax, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 t -- ^ __grad__ -> Tensor v'3 targmax -- ^ __argmax__ -> Tensor Build t -- ^ __output__ maxPoolGradWithArgmax = maxPoolGradWithArgmax' id maxPoolGradWithArgmax' :: forall v'1 v'2 v'3 targmax t . (OneOf '[Data.Int.Int32, Data.Int.Int64] targmax, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 t -- ^ __grad__ -> Tensor v'3 targmax -- ^ __argmax__ -> Tensor Build t -- ^ __output__ maxPoolGradWithArgmax' op'options input grad argmax | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs grad, buildInputs argmax] return (opDef "MaxPoolGradWithArgmax" & opAttr "Targmax" .~ tensorType (undefined :: targmax) & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "grad" type_attr: "T" } input_arg { name: "argmax" type_attr: "Targmax" } output_arg { name: "output" type_attr: "T" } attr { name: "ksize" type: "list(int)" has_minimum: true minimum: 4 } attr { name: "strides" type: "list(int)" has_minimum: true minimum: 4 } attr { name: "padding" type: "string" allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "include_batch_in_index" type: "bool" default_value { b: false } } attr { name: "Targmax" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_INT64 type: DT_BFLOAT16 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } -} -- | maxPoolV2 :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 Data.Int.Int32 -- ^ __ksize__ -> Tensor v'3 Data.Int.Int32 -- ^ __strides__ -> Tensor Build t -- ^ __output__ maxPoolV2 = maxPoolV2' id maxPoolV2' :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 Data.Int.Int32 -- ^ __ksize__ -> Tensor v'3 Data.Int.Int32 -- ^ __strides__ -> Tensor Build t -- ^ __output__ maxPoolV2' op'options input ksize strides | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs ksize, buildInputs strides] return (opDef "MaxPoolV2" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "ksize" type: DT_INT32 } input_arg { name: "strides" type: DT_INT32 } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_HALF type: DT_BFLOAT16 type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_UINT16 type: DT_QINT8 } } } attr { name: "padding" type: "string" allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "data_format" type: "string" default_value { s: "NHWC" } allowed_values { list { s: "NHWC" s: "NCHW" s: "NCHW_VECT_C" } } } -} -- | maxPoolWithArgmax :: forall v'1 targmax t . (OneOf '[Data.Int.Int32, Data.Int.Int64] targmax, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __input__ -> (Tensor Build t, Tensor Build targmax) -- ^ (__output__, __argmax__) -- -- * __output__ -- -- * __argmax__ maxPoolWithArgmax = maxPoolWithArgmax' id maxPoolWithArgmax' :: forall v'1 targmax t . (OneOf '[Data.Int.Int32, Data.Int.Int64] targmax, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> (Tensor Build t, Tensor Build targmax) -- ^ (__output__, __argmax__) -- -- * __output__ -- -- * __argmax__ maxPoolWithArgmax' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "MaxPoolWithArgmax" & opAttr "Targmax" .~ tensorType (undefined :: targmax) & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type_attr: "T" } output_arg { name: "argmax" type_attr: "Targmax" } attr { name: "ksize" type: "list(int)" has_minimum: true minimum: 4 } attr { name: "strides" type: "list(int)" has_minimum: true minimum: 4 } attr { name: "Targmax" type: "type" default_value { type: DT_INT64 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "padding" type: "string" allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "include_batch_in_index" type: "bool" default_value { b: false } } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_INT64 type: DT_BFLOAT16 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } -} -- | maximum :: forall v'1 v'2 t . (OneOf '[Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ maximum = maximum' id maximum' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ maximum' op'options x y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y] return (opDef "Maximum" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 } } } -} -- | mean :: forall v'1 v'2 t tidx . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 tidx -- ^ __reduction_indices__ -> Tensor Build t -- ^ __output__ mean = mean' id mean' :: forall v'1 v'2 t tidx . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 tidx -- ^ __reduction_indices__ -> Tensor Build t -- ^ __output__ mean' op'options input reduction_indices | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs reduction_indices] return (opDef "Mean" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tidx" .~ tensorType (undefined :: tidx) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "reduction_indices" type_attr: "Tidx" } output_arg { name: "output" type_attr: "T" } attr { name: "keep_dims" type: "bool" default_value { b: false } } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tidx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | merge :: forall v'1 t . (TensorType t) => [Tensor v'1 t] -- ^ __inputs__ -> (Tensor Build t, Tensor Build Data.Int.Int32) -- ^ (__output__, __value_index__) -- -- * __output__ -- -- * __value_index__ merge = merge' id merge' :: forall v'1 t . (TensorType t) => OpParams -> [Tensor v'1 t] -- ^ __inputs__ -> (Tensor Build t, Tensor Build Data.Int.Int32) -- ^ (__output__, __value_index__) -- -- * __output__ -- -- * __value_index__ merge' op'options inputs | eqLengthGuard [("N", [("inputs", length inputs)])] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs inputs] return (opDef "Merge" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "N" .~ n & op'options & opInputs .~ op'inputs) where n = fromIntegral (length inputs) :: Int64 {- input_arg { name: "inputs" type_attr: "T" number_attr: "N" } output_arg { name: "output" type_attr: "T" } output_arg { name: "value_index" type: DT_INT32 } attr { name: "T" type: "type" } attr { name: "N" type: "int" has_minimum: true minimum: 1 } -} -- | mergeSummary :: [Tensor v'1 Data.ByteString.ByteString] -- ^ __inputs__ -> Tensor Build Data.ByteString.ByteString -- ^ __summary__ mergeSummary = mergeSummary' id mergeSummary' :: OpParams -> [Tensor v'1 Data.ByteString.ByteString] -- ^ __inputs__ -> Tensor Build Data.ByteString.ByteString -- ^ __summary__ mergeSummary' op'options inputs | eqLengthGuard [("N", [("inputs", length inputs)])] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs inputs] return (opDef "MergeSummary" & opAttr "N" .~ n & op'options & opInputs .~ op'inputs) where n = fromIntegral (length inputs) :: Int64 {- input_arg { name: "inputs" type: DT_STRING number_attr: "N" } output_arg { name: "summary" type: DT_STRING } attr { name: "N" type: "int" has_minimum: true minimum: 1 } -} -- | mergeV2Checkpoints :: forall v'1 v'2 m' . (MonadBuild m') => Tensor v'1 Data.ByteString.ByteString -- ^ __checkpoint_prefixes__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __destination_prefix__ -> m' (ControlNode) mergeV2Checkpoints = mergeV2Checkpoints' id mergeV2Checkpoints' :: forall v'1 v'2 m' . (MonadBuild m') => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __checkpoint_prefixes__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __destination_prefix__ -> m' (ControlNode) mergeV2Checkpoints' op'options checkpoint_prefixes destination_prefix | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs checkpoint_prefixes, buildInputs destination_prefix] buildOp [] (opDef "MergeV2Checkpoints" & op'options & opInputs .~ op'inputs) {- input_arg { name: "checkpoint_prefixes" type: DT_STRING } input_arg { name: "destination_prefix" type: DT_STRING } attr { name: "delete_old_dirs" type: "bool" default_value { b: true } } -} -- | mfcc :: Tensor v'1 Float -- ^ __spectrogram__ -> Tensor v'2 Data.Int.Int32 -- ^ __sample_rate__ -> Tensor Build Float -- ^ __output__ mfcc = mfcc' id mfcc' :: OpParams -> Tensor v'1 Float -- ^ __spectrogram__ -> Tensor v'2 Data.Int.Int32 -- ^ __sample_rate__ -> Tensor Build Float -- ^ __output__ mfcc' op'options spectrogram sample_rate | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs spectrogram, buildInputs sample_rate] return (opDef "Mfcc" & op'options & opInputs .~ op'inputs) {- input_arg { name: "spectrogram" type: DT_FLOAT } input_arg { name: "sample_rate" type: DT_INT32 } output_arg { name: "output" type: DT_FLOAT } attr { name: "upper_frequency_limit" type: "float" default_value { f: 4000.0 } } attr { name: "lower_frequency_limit" type: "float" default_value { f: 20.0 } } attr { name: "filterbank_channel_count" type: "int" default_value { i: 40 } } attr { name: "dct_coefficient_count" type: "int" default_value { i: 13 } } -} -- | min :: forall v'1 v'2 t tidx . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 tidx -- ^ __reduction_indices__ -> Tensor Build t -- ^ __output__ min = min' id min' :: forall v'1 v'2 t tidx . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 tidx -- ^ __reduction_indices__ -> Tensor Build t -- ^ __output__ min' op'options input reduction_indices | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs reduction_indices] return (opDef "Min" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tidx" .~ tensorType (undefined :: tidx) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "reduction_indices" type_attr: "Tidx" } output_arg { name: "output" type_attr: "T" } attr { name: "keep_dims" type: "bool" default_value { b: false } } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tidx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | minimum :: forall v'1 v'2 t . (OneOf '[Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ minimum = minimum' id minimum' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ minimum' op'options x y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y] return (opDef "Minimum" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 } } } -} -- | mirrorPad :: forall v'1 v'2 t tpaddings . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tpaddings) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 tpaddings -- ^ __paddings__ -> Tensor Build t -- ^ __output__ mirrorPad = mirrorPad' id mirrorPad' :: forall v'1 v'2 t tpaddings . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tpaddings) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 tpaddings -- ^ __paddings__ -> Tensor Build t -- ^ __output__ mirrorPad' op'options input paddings | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs paddings] return (opDef "MirrorPad" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tpaddings" .~ tensorType (undefined :: tpaddings) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "paddings" type_attr: "Tpaddings" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "Tpaddings" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "mode" type: "string" allowed_values { list { s: "REFLECT" s: "SYMMETRIC" } } } -} -- | mirrorPadGrad :: forall v'1 v'2 t tpaddings . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tpaddings) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 tpaddings -- ^ __paddings__ -> Tensor Build t -- ^ __output__ mirrorPadGrad = mirrorPadGrad' id mirrorPadGrad' :: forall v'1 v'2 t tpaddings . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tpaddings) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 tpaddings -- ^ __paddings__ -> Tensor Build t -- ^ __output__ mirrorPadGrad' op'options input paddings | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs paddings] return (opDef "MirrorPadGrad" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tpaddings" .~ tensorType (undefined :: tpaddings) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "paddings" type_attr: "Tpaddings" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "Tpaddings" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "mode" type: "string" allowed_values { list { s: "REFLECT" s: "SYMMETRIC" } } } -} -- | mod :: forall v'1 v'2 t . (OneOf '[Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ mod = mod' id mod' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ mod' op'options x y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y] return (opDef "Mod" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 type: DT_HALF type: DT_HALF type: DT_BFLOAT16 type: DT_FLOAT type: DT_DOUBLE } } } -} -- | modelDataset :: [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor Build Variant -- ^ __handle__ modelDataset = modelDataset' id modelDataset' :: OpParams -> [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor Build Variant -- ^ __handle__ modelDataset' op'options output_types input_dataset | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_dataset] return (opDef "ModelDataset" & opAttr "output_types" .~ output_types & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_dataset" type: DT_VARIANT } output_arg { name: "handle" type: DT_VARIANT } attr { name: "cpu_budget" type: "int" default_value { i: 0 } } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } -} -- | mul :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ mul = mul' id mul' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ mul' op'options x y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y] return (opDef "Mul" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_UINT8 type: DT_INT8 type: DT_UINT16 type: DT_INT16 type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | mulNoNan :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ mulNoNan = mulNoNan' id mulNoNan' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ mulNoNan' op'options x y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y] return (opDef "MulNoNan" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | multiDeviceIterator :: forall m' . (MonadBuild m') => [DataType] -- ^ __output_types__ -> m' (Tensor Value ResourceHandle) -- ^ __handle__ multiDeviceIterator = multiDeviceIterator' id multiDeviceIterator' :: forall m' . (MonadBuild m') => OpParams -> [DataType] -- ^ __output_types__ -> m' (Tensor Value ResourceHandle) -- ^ __handle__ multiDeviceIterator' op'options output_types | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "MultiDeviceIterator" & opAttr "output_types" .~ output_types & op'options & opInputs .~ op'inputs) {- output_arg { name: "handle" type: DT_RESOURCE } attr { name: "devices" type: "list(string)" has_minimum: true minimum: 1 } attr { name: "shared_name" type: "string" } attr { name: "container" type: "string" } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } -} -- | multiDeviceIteratorFromStringHandle :: forall v'1 m' . (MonadBuild m') => Tensor v'1 Data.ByteString.ByteString -- ^ __string_handle__ -> m' (Tensor Value ResourceHandle) -- ^ __multi_device_iterator__ multiDeviceIteratorFromStringHandle = multiDeviceIteratorFromStringHandle' id multiDeviceIteratorFromStringHandle' :: forall v'1 m' . (MonadBuild m') => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __string_handle__ -> m' (Tensor Value ResourceHandle) -- ^ __multi_device_iterator__ multiDeviceIteratorFromStringHandle' op'options string_handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs string_handle] buildOp [] (opDef "MultiDeviceIteratorFromStringHandle" & op'options & opInputs .~ op'inputs) {- input_arg { name: "string_handle" type: DT_STRING } output_arg { name: "multi_device_iterator" type: DT_RESOURCE } attr { name: "output_types" type: "list(type)" default_value { list { } } has_minimum: true } attr { name: "output_shapes" type: "list(shape)" default_value { list { } } has_minimum: true } -} -- | multiDeviceIteratorGetNextFromShard :: forall v'1 v'2 v'3 output_types m' . (MonadBuild m', TensorTypes output_types) => Tensor v'1 ResourceHandle -- ^ __multi_device_iterator__ -> Tensor v'2 Data.Int.Int32 -- ^ __shard_num__ -> Tensor v'3 Data.Int.Int64 -- ^ __incarnation_id__ -> m' (TensorList (Value) output_types) -- ^ __components__ multiDeviceIteratorGetNextFromShard = multiDeviceIteratorGetNextFromShard' id multiDeviceIteratorGetNextFromShard' :: forall v'1 v'2 v'3 output_types m' . (MonadBuild m', TensorTypes output_types) => OpParams -> Tensor v'1 ResourceHandle -- ^ __multi_device_iterator__ -> Tensor v'2 Data.Int.Int32 -- ^ __shard_num__ -> Tensor v'3 Data.Int.Int64 -- ^ __incarnation_id__ -> m' (TensorList (Value) output_types) -- ^ __components__ multiDeviceIteratorGetNextFromShard' op'options multi_device_iterator shard_num incarnation_id | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs multi_device_iterator, buildInputs shard_num, buildInputs incarnation_id] buildOp [] (opDef "MultiDeviceIteratorGetNextFromShard" & opAttr "output_types" .~ fromTensorTypes (Proxy :: Proxy output_types) & op'options & opInputs .~ op'inputs) {- input_arg { name: "multi_device_iterator" type: DT_RESOURCE } input_arg { name: "shard_num" type: DT_INT32 } input_arg { name: "incarnation_id" type: DT_INT64 } output_arg { name: "components" type_list_attr: "output_types" } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } -} -- | multiDeviceIteratorInit :: forall v'1 v'2 v'3 m' . (MonadBuild m') => Tensor v'1 Variant -- ^ __dataset__ -> Tensor v'2 ResourceHandle -- ^ __multi_device_iterator__ -> Tensor v'3 Data.Int.Int64 -- ^ __max_buffer_size__ -> m' (Tensor Value Data.Int.Int64) -- ^ __incarnation_id__ multiDeviceIteratorInit = multiDeviceIteratorInit' id multiDeviceIteratorInit' :: forall v'1 v'2 v'3 m' . (MonadBuild m') => OpParams -> Tensor v'1 Variant -- ^ __dataset__ -> Tensor v'2 ResourceHandle -- ^ __multi_device_iterator__ -> Tensor v'3 Data.Int.Int64 -- ^ __max_buffer_size__ -> m' (Tensor Value Data.Int.Int64) -- ^ __incarnation_id__ multiDeviceIteratorInit' op'options dataset multi_device_iterator max_buffer_size | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs dataset, buildInputs multi_device_iterator, buildInputs max_buffer_size] buildOp [] (opDef "MultiDeviceIteratorInit" & op'options & opInputs .~ op'inputs) {- input_arg { name: "dataset" type: DT_VARIANT } input_arg { name: "multi_device_iterator" type: DT_RESOURCE } input_arg { name: "max_buffer_size" type: DT_INT64 } output_arg { name: "incarnation_id" type: DT_INT64 } -} -- | multiDeviceIteratorToStringHandle :: forall v'1 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __multi_device_iterator__ -> m' (Tensor Value Data.ByteString.ByteString) -- ^ __string_handle__ multiDeviceIteratorToStringHandle = multiDeviceIteratorToStringHandle' id multiDeviceIteratorToStringHandle' :: forall v'1 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __multi_device_iterator__ -> m' (Tensor Value Data.ByteString.ByteString) -- ^ __string_handle__ multiDeviceIteratorToStringHandle' op'options multi_device_iterator | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs multi_device_iterator] buildOp [] (opDef "MultiDeviceIteratorToStringHandle" & op'options & opInputs .~ op'inputs) {- input_arg { name: "multi_device_iterator" type: DT_RESOURCE } output_arg { name: "string_handle" type: DT_STRING } -} -- | multinomial :: forall v'1 v'2 t output_dtype m' . (MonadBuild m', OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] output_dtype) => Tensor v'1 t -- ^ __logits__ -> Tensor v'2 Data.Int.Int32 -- ^ __num_samples__ -> m' (Tensor Value output_dtype) -- ^ __output__ multinomial = multinomial' id multinomial' :: forall v'1 v'2 t output_dtype m' . (MonadBuild m', OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] output_dtype) => OpParams -> Tensor v'1 t -- ^ __logits__ -> Tensor v'2 Data.Int.Int32 -- ^ __num_samples__ -> m' (Tensor Value output_dtype) -- ^ __output__ multinomial' op'options logits num_samples | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs logits, buildInputs num_samples] buildOp [] (opDef "Multinomial" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "output_dtype" .~ tensorType (undefined :: output_dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "logits" type_attr: "T" } input_arg { name: "num_samples" type: DT_INT32 } output_arg { name: "output" type_attr: "output_dtype" } attr { name: "seed" type: "int" default_value { i: 0 } } attr { name: "seed2" type: "int" default_value { i: 0 } } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_INT64 type: DT_BFLOAT16 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "output_dtype" type: "type" default_value { type: DT_INT64 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | mutableDenseHashTable :: forall v'1 key_dtype m' . (MonadBuild m', TensorType key_dtype) => DataType -- ^ __value_dtype__ -> Tensor v'1 key_dtype -- ^ __empty_key__ -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __table_handle__ mutableDenseHashTable = mutableDenseHashTable' id mutableDenseHashTable' :: forall v'1 key_dtype m' . (MonadBuild m', TensorType key_dtype) => OpParams -> DataType -- ^ __value_dtype__ -> Tensor v'1 key_dtype -- ^ __empty_key__ -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __table_handle__ mutableDenseHashTable' op'options value_dtype empty_key | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs empty_key] buildOp [] (opDef "MutableDenseHashTable" & opAttr "key_dtype" .~ tensorType (undefined :: key_dtype) & opAttr "value_dtype" .~ value_dtype & op'options & opInputs .~ op'inputs) {- input_arg { name: "empty_key" type_attr: "key_dtype" } output_arg { name: "table_handle" type: DT_STRING is_ref: true } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } attr { name: "use_node_name_sharing" type: "bool" default_value { b: false } } attr { name: "key_dtype" type: "type" } attr { name: "value_dtype" type: "type" } attr { name: "value_shape" type: "shape" default_value { shape { } } } attr { name: "initial_num_buckets" type: "int" default_value { i: 131072 } } attr { name: "max_load_factor" type: "float" default_value { f: 0.8 } } -} -- | mutableDenseHashTableV2 :: forall v'1 v'2 key_dtype m' . (MonadBuild m', TensorType key_dtype) => DataType -- ^ __value_dtype__ -> Tensor v'1 key_dtype -- ^ __empty_key__ -> Tensor v'2 key_dtype -- ^ __deleted_key__ -> m' (Tensor Value ResourceHandle) -- ^ __table_handle__ mutableDenseHashTableV2 = mutableDenseHashTableV2' id mutableDenseHashTableV2' :: forall v'1 v'2 key_dtype m' . (MonadBuild m', TensorType key_dtype) => OpParams -> DataType -- ^ __value_dtype__ -> Tensor v'1 key_dtype -- ^ __empty_key__ -> Tensor v'2 key_dtype -- ^ __deleted_key__ -> m' (Tensor Value ResourceHandle) -- ^ __table_handle__ mutableDenseHashTableV2' op'options value_dtype empty_key deleted_key | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs empty_key, buildInputs deleted_key] buildOp [] (opDef "MutableDenseHashTableV2" & opAttr "key_dtype" .~ tensorType (undefined :: key_dtype) & opAttr "value_dtype" .~ value_dtype & op'options & opInputs .~ op'inputs) {- input_arg { name: "empty_key" type_attr: "key_dtype" } input_arg { name: "deleted_key" type_attr: "key_dtype" } output_arg { name: "table_handle" type: DT_RESOURCE } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } attr { name: "use_node_name_sharing" type: "bool" default_value { b: false } } attr { name: "key_dtype" type: "type" } attr { name: "value_dtype" type: "type" } attr { name: "value_shape" type: "shape" default_value { shape { } } } attr { name: "initial_num_buckets" type: "int" default_value { i: 131072 } } attr { name: "max_load_factor" type: "float" default_value { f: 0.8 } } -} -- | mutableHashTable :: forall m' . (MonadBuild m') => DataType -- ^ __key_dtype__ -> DataType -- ^ __value_dtype__ -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __table_handle__ mutableHashTable = mutableHashTable' id mutableHashTable' :: forall m' . (MonadBuild m') => OpParams -> DataType -- ^ __key_dtype__ -> DataType -- ^ __value_dtype__ -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __table_handle__ mutableHashTable' op'options key_dtype value_dtype | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "MutableHashTable" & opAttr "key_dtype" .~ key_dtype & opAttr "value_dtype" .~ value_dtype & op'options & opInputs .~ op'inputs) {- output_arg { name: "table_handle" type: DT_STRING is_ref: true } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } attr { name: "use_node_name_sharing" type: "bool" default_value { b: false } } attr { name: "key_dtype" type: "type" } attr { name: "value_dtype" type: "type" } -} -- | mutableHashTableOfTensors :: forall m' . (MonadBuild m') => DataType -- ^ __key_dtype__ -> DataType -- ^ __value_dtype__ -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __table_handle__ mutableHashTableOfTensors = mutableHashTableOfTensors' id mutableHashTableOfTensors' :: forall m' . (MonadBuild m') => OpParams -> DataType -- ^ __key_dtype__ -> DataType -- ^ __value_dtype__ -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __table_handle__ mutableHashTableOfTensors' op'options key_dtype value_dtype | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "MutableHashTableOfTensors" & opAttr "key_dtype" .~ key_dtype & opAttr "value_dtype" .~ value_dtype & op'options & opInputs .~ op'inputs) {- output_arg { name: "table_handle" type: DT_STRING is_ref: true } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } attr { name: "use_node_name_sharing" type: "bool" default_value { b: false } } attr { name: "key_dtype" type: "type" } attr { name: "value_dtype" type: "type" } attr { name: "value_shape" type: "shape" default_value { shape { } } } -} -- | mutableHashTableOfTensorsV2 :: forall m' . (MonadBuild m') => DataType -- ^ __key_dtype__ -> DataType -- ^ __value_dtype__ -> m' (Tensor Value ResourceHandle) -- ^ __table_handle__ mutableHashTableOfTensorsV2 = mutableHashTableOfTensorsV2' id mutableHashTableOfTensorsV2' :: forall m' . (MonadBuild m') => OpParams -> DataType -- ^ __key_dtype__ -> DataType -- ^ __value_dtype__ -> m' (Tensor Value ResourceHandle) -- ^ __table_handle__ mutableHashTableOfTensorsV2' op'options key_dtype value_dtype | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "MutableHashTableOfTensorsV2" & opAttr "key_dtype" .~ key_dtype & opAttr "value_dtype" .~ value_dtype & op'options & opInputs .~ op'inputs) {- output_arg { name: "table_handle" type: DT_RESOURCE } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } attr { name: "use_node_name_sharing" type: "bool" default_value { b: false } } attr { name: "key_dtype" type: "type" } attr { name: "value_dtype" type: "type" } attr { name: "value_shape" type: "shape" default_value { shape { } } } -} -- | mutableHashTableV2 :: forall m' . (MonadBuild m') => DataType -- ^ __key_dtype__ -> DataType -- ^ __value_dtype__ -> m' (Tensor Value ResourceHandle) -- ^ __table_handle__ mutableHashTableV2 = mutableHashTableV2' id mutableHashTableV2' :: forall m' . (MonadBuild m') => OpParams -> DataType -- ^ __key_dtype__ -> DataType -- ^ __value_dtype__ -> m' (Tensor Value ResourceHandle) -- ^ __table_handle__ mutableHashTableV2' op'options key_dtype value_dtype | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "MutableHashTableV2" & opAttr "key_dtype" .~ key_dtype & opAttr "value_dtype" .~ value_dtype & op'options & opInputs .~ op'inputs) {- output_arg { name: "table_handle" type: DT_RESOURCE } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } attr { name: "use_node_name_sharing" type: "bool" default_value { b: false } } attr { name: "key_dtype" type: "type" } attr { name: "value_dtype" type: "type" } -} -- | mutexLock :: forall v'1 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __mutex__ -> m' (Tensor Value Variant) -- ^ __mutex_lock__ mutexLock = mutexLock' id mutexLock' :: forall v'1 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __mutex__ -> m' (Tensor Value Variant) -- ^ __mutex_lock__ mutexLock' op'options mutex | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs mutex] buildOp [] (opDef "MutexLock" & op'options & opInputs .~ op'inputs) {- input_arg { name: "mutex" type: DT_RESOURCE } output_arg { name: "mutex_lock" type: DT_VARIANT } -} -- | mutexV2 :: forall m' . (MonadBuild m') => m' (Tensor Value ResourceHandle) -- ^ __resource__ mutexV2 = mutexV2' id mutexV2' :: forall m' . (MonadBuild m') => OpParams -> m' (Tensor Value ResourceHandle) -- ^ __resource__ mutexV2' op'options | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "MutexV2" & op'options & opInputs .~ op'inputs) {- output_arg { name: "resource" type: DT_RESOURCE } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } -} -- | ncclAllReduce :: forall v'1 t m' . (MonadBuild m', OneOf '[Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => Data.Int.Int64 -- ^ __num_devices__ -> Tensor v'1 t -- ^ __input__ -> m' (Tensor Value t) -- ^ __data__ ncclAllReduce = ncclAllReduce' id ncclAllReduce' :: forall v'1 t m' . (MonadBuild m', OneOf '[Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => OpParams -> Data.Int.Int64 -- ^ __num_devices__ -> Tensor v'1 t -- ^ __input__ -> m' (Tensor Value t) -- ^ __data__ ncclAllReduce' op'options num_devices input | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] buildOp [] (opDef "NcclAllReduce" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "num_devices" .~ num_devices & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "data" type_attr: "T" } attr { name: "reduction" type: "string" allowed_values { list { s: "min" s: "max" s: "prod" s: "sum" } } } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 } } } attr { name: "num_devices" type: "int" } attr { name: "shared_name" type: "string" } -} -- | ncclBroadcast :: forall v'1 t m' . (MonadBuild m', OneOf '[Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => Shape -- ^ __shape__ -> Tensor v'1 t -- ^ __input__ -> m' (Tensor Value t) -- ^ __output__ ncclBroadcast = ncclBroadcast' id ncclBroadcast' :: forall v'1 t m' . (MonadBuild m', OneOf '[Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => OpParams -> Shape -- ^ __shape__ -> Tensor v'1 t -- ^ __input__ -> m' (Tensor Value t) -- ^ __output__ ncclBroadcast' op'options shape input | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] buildOp [] (opDef "NcclBroadcast" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "shape" .~ shape & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 } } } attr { name: "shape" type: "shape" } -} -- | ncclReduce :: forall v'1 t m' . (MonadBuild m', OneOf '[Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => [Tensor v'1 t] -- ^ __input__ -> m' (Tensor Value t) -- ^ __data__ ncclReduce = ncclReduce' id ncclReduce' :: forall v'1 t m' . (MonadBuild m', OneOf '[Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => OpParams -> [Tensor v'1 t] -- ^ __input__ -> m' (Tensor Value t) -- ^ __data__ ncclReduce' op'options input | eqLengthGuard [("num_devices", [("input", length input)])] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] buildOp [] (opDef "NcclReduce" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "num_devices" .~ num_devices & op'options & opInputs .~ op'inputs) where num_devices = fromIntegral (length input) :: Int64 {- input_arg { name: "input" type_attr: "T" number_attr: "num_devices" } output_arg { name: "data" type_attr: "T" } attr { name: "reduction" type: "string" allowed_values { list { s: "min" s: "max" s: "prod" s: "sum" } } } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 } } } attr { name: "num_devices" type: "int" has_minimum: true minimum: 1 } -} -- | nearestNeighbors :: Tensor v'1 Float -- ^ __points__ -> Tensor v'2 Float -- ^ __centers__ -> Tensor v'3 Data.Int.Int64 -- ^ __k__ -> (Tensor Build Data.Int.Int64, Tensor Build Float) -- ^ (__nearest_center_indices__, __nearest_center_distances__) -- -- * __nearest_center_indices__ -- -- * __nearest_center_distances__ nearestNeighbors = nearestNeighbors' id nearestNeighbors' :: OpParams -> Tensor v'1 Float -- ^ __points__ -> Tensor v'2 Float -- ^ __centers__ -> Tensor v'3 Data.Int.Int64 -- ^ __k__ -> (Tensor Build Data.Int.Int64, Tensor Build Float) -- ^ (__nearest_center_indices__, __nearest_center_distances__) -- -- * __nearest_center_indices__ -- -- * __nearest_center_distances__ nearestNeighbors' op'options points centers k | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs points, buildInputs centers, buildInputs k] return (opDef "NearestNeighbors" & op'options & opInputs .~ op'inputs) {- input_arg { name: "points" type: DT_FLOAT } input_arg { name: "centers" type: DT_FLOAT } input_arg { name: "k" type: DT_INT64 } output_arg { name: "nearest_center_indices" type: DT_INT64 } output_arg { name: "nearest_center_distances" type: DT_FLOAT } -} -- | neg :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ neg = neg' id neg' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ neg' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "Neg" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | negTrain :: forall v'3 v'4 v'5 m' . (MonadBuild m') => Data.Int.Int64 -- ^ __num_negative_samples__ -> Tensor Ref Float -- ^ __w_in__ -> Tensor Ref Float -- ^ __w_out__ -> Tensor v'3 Data.Int.Int32 -- ^ __examples__ -> Tensor v'4 Data.Int.Int32 -- ^ __labels__ -> Tensor v'5 Float -- ^ __lr__ -> m' (ControlNode) negTrain = negTrain' id negTrain' :: forall v'3 v'4 v'5 m' . (MonadBuild m') => OpParams -> Data.Int.Int64 -- ^ __num_negative_samples__ -> Tensor Ref Float -- ^ __w_in__ -> Tensor Ref Float -- ^ __w_out__ -> Tensor v'3 Data.Int.Int32 -- ^ __examples__ -> Tensor v'4 Data.Int.Int32 -- ^ __labels__ -> Tensor v'5 Float -- ^ __lr__ -> m' (ControlNode) negTrain' op'options num_negative_samples w_in w_out examples labels lr | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs w_in, buildInputs w_out, buildInputs examples, buildInputs labels, buildInputs lr] buildOp [] (opDef "NegTrain" & opAttr "num_negative_samples" .~ num_negative_samples & op'options & opInputs .~ op'inputs) {- input_arg { name: "w_in" type: DT_FLOAT is_ref: true } input_arg { name: "w_out" type: DT_FLOAT is_ref: true } input_arg { name: "examples" type: DT_INT32 } input_arg { name: "labels" type: DT_INT32 } input_arg { name: "lr" type: DT_FLOAT } attr { name: "vocab_count" type: "list(int)" } attr { name: "num_negative_samples" type: "int" } -} -- | nextAfter :: forall v'1 v'2 t . (OneOf '[Double, Float] t) => Tensor v'1 t -- ^ __x1__ -> Tensor v'2 t -- ^ __x2__ -> Tensor Build t -- ^ __output__ nextAfter = nextAfter' id nextAfter' :: forall v'1 v'2 t . (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x1__ -> Tensor v'2 t -- ^ __x2__ -> Tensor Build t -- ^ __output__ nextAfter' op'options x1 x2 | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x1, buildInputs x2] return (opDef "NextAfter" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x1" type_attr: "T" } input_arg { name: "x2" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_DOUBLE type: DT_FLOAT } } } -} -- | nextIteration :: forall v'1 t . (TensorType t) => Tensor v'1 t -- ^ __data__ -> Tensor Build t -- ^ __output__ nextIteration = nextIteration' id nextIteration' :: forall v'1 t . (TensorType t) => OpParams -> Tensor v'1 t -- ^ __data__ -> Tensor Build t -- ^ __output__ nextIteration' op'options data' | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs data'] return (opDef "NextIteration" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "data" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } -} -- | noOp :: forall m' . (MonadBuild m') => m' (ControlNode) noOp = noOp' id noOp' :: forall m' . (MonadBuild m') => OpParams -> m' (ControlNode) noOp' op'options | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "NoOp" & op'options & opInputs .~ op'inputs) {- -} -- | nonDeterministicInts :: forall v'1 dtype shape_dtype m' . (MonadBuild m', TensorType dtype, TensorType shape_dtype) => Tensor v'1 shape_dtype -- ^ __shape__ -> m' (Tensor Value dtype) -- ^ __output__ nonDeterministicInts = nonDeterministicInts' id nonDeterministicInts' :: forall v'1 dtype shape_dtype m' . (MonadBuild m', TensorType dtype, TensorType shape_dtype) => OpParams -> Tensor v'1 shape_dtype -- ^ __shape__ -> m' (Tensor Value dtype) -- ^ __output__ nonDeterministicInts' op'options shape | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs shape] buildOp [] (opDef "NonDeterministicInts" & opAttr "dtype" .~ tensorType (undefined :: dtype) & opAttr "shape_dtype" .~ tensorType (undefined :: shape_dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "shape" type_attr: "shape_dtype" } output_arg { name: "output" type_attr: "dtype" } attr { name: "dtype" type: "type" default_value { type: DT_INT64 } } attr { name: "shape_dtype" type: "type" default_value { type: DT_INT64 } } -} -- | nonMaxSuppression :: Tensor v'1 Float -- ^ __boxes__ -> Tensor v'2 Float -- ^ __scores__ -> Tensor v'3 Data.Int.Int32 -- ^ __max_output_size__ -> Tensor Build Data.Int.Int32 -- ^ __selected_indices__ nonMaxSuppression = nonMaxSuppression' id nonMaxSuppression' :: OpParams -> Tensor v'1 Float -- ^ __boxes__ -> Tensor v'2 Float -- ^ __scores__ -> Tensor v'3 Data.Int.Int32 -- ^ __max_output_size__ -> Tensor Build Data.Int.Int32 -- ^ __selected_indices__ nonMaxSuppression' op'options boxes scores max_output_size | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs boxes, buildInputs scores, buildInputs max_output_size] return (opDef "NonMaxSuppression" & op'options & opInputs .~ op'inputs) {- input_arg { name: "boxes" type: DT_FLOAT } input_arg { name: "scores" type: DT_FLOAT } input_arg { name: "max_output_size" type: DT_INT32 } output_arg { name: "selected_indices" type: DT_INT32 } attr { name: "iou_threshold" type: "float" default_value { f: 0.5 } } -} -- | nonMaxSuppressionV2 :: forall v'1 v'2 v'3 v'4 t . (OneOf '[Data.Word.Word16, Float] t) => Tensor v'1 t -- ^ __boxes__ -> Tensor v'2 t -- ^ __scores__ -> Tensor v'3 Data.Int.Int32 -- ^ __max_output_size__ -> Tensor v'4 Float -- ^ __iou_threshold__ -> Tensor Build Data.Int.Int32 -- ^ __selected_indices__ nonMaxSuppressionV2 = nonMaxSuppressionV2' id nonMaxSuppressionV2' :: forall v'1 v'2 v'3 v'4 t . (OneOf '[Data.Word.Word16, Float] t) => OpParams -> Tensor v'1 t -- ^ __boxes__ -> Tensor v'2 t -- ^ __scores__ -> Tensor v'3 Data.Int.Int32 -- ^ __max_output_size__ -> Tensor v'4 Float -- ^ __iou_threshold__ -> Tensor Build Data.Int.Int32 -- ^ __selected_indices__ nonMaxSuppressionV2' op'options boxes scores max_output_size iou_threshold | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs boxes, buildInputs scores, buildInputs max_output_size, buildInputs iou_threshold] return (opDef "NonMaxSuppressionV2" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "boxes" type_attr: "T" } input_arg { name: "scores" type_attr: "T" } input_arg { name: "max_output_size" type: DT_INT32 } input_arg { name: "iou_threshold" type: DT_FLOAT } output_arg { name: "selected_indices" type: DT_INT32 } attr { name: "T" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_HALF type: DT_FLOAT } } } -} -- | nonMaxSuppressionV3 :: forall v'1 v'2 v'3 v'4 v'5 t . (OneOf '[Data.Word.Word16, Float] t) => Tensor v'1 t -- ^ __boxes__ -> Tensor v'2 t -- ^ __scores__ -> Tensor v'3 Data.Int.Int32 -- ^ __max_output_size__ -> Tensor v'4 Float -- ^ __iou_threshold__ -> Tensor v'5 Float -- ^ __score_threshold__ -> Tensor Build Data.Int.Int32 -- ^ __selected_indices__ nonMaxSuppressionV3 = nonMaxSuppressionV3' id nonMaxSuppressionV3' :: forall v'1 v'2 v'3 v'4 v'5 t . (OneOf '[Data.Word.Word16, Float] t) => OpParams -> Tensor v'1 t -- ^ __boxes__ -> Tensor v'2 t -- ^ __scores__ -> Tensor v'3 Data.Int.Int32 -- ^ __max_output_size__ -> Tensor v'4 Float -- ^ __iou_threshold__ -> Tensor v'5 Float -- ^ __score_threshold__ -> Tensor Build Data.Int.Int32 -- ^ __selected_indices__ nonMaxSuppressionV3' op'options boxes scores max_output_size iou_threshold score_threshold | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs boxes, buildInputs scores, buildInputs max_output_size, buildInputs iou_threshold, buildInputs score_threshold] return (opDef "NonMaxSuppressionV3" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "boxes" type_attr: "T" } input_arg { name: "scores" type_attr: "T" } input_arg { name: "max_output_size" type: DT_INT32 } input_arg { name: "iou_threshold" type: DT_FLOAT } input_arg { name: "score_threshold" type: DT_FLOAT } output_arg { name: "selected_indices" type: DT_INT32 } attr { name: "T" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_HALF type: DT_FLOAT } } } -} -- | nonMaxSuppressionV4 :: forall v'1 v'2 v'3 v'4 v'5 t . (OneOf '[Data.Word.Word16, Float] t) => Tensor v'1 t -- ^ __boxes__ -> Tensor v'2 t -- ^ __scores__ -> Tensor v'3 Data.Int.Int32 -- ^ __max_output_size__ -> Tensor v'4 Float -- ^ __iou_threshold__ -> Tensor v'5 Float -- ^ __score_threshold__ -> (Tensor Build Data.Int.Int32, Tensor Build Data.Int.Int32) -- ^ (__selected_indices__, __valid_outputs__) -- -- * __selected_indices__ -- -- * __valid_outputs__ nonMaxSuppressionV4 = nonMaxSuppressionV4' id nonMaxSuppressionV4' :: forall v'1 v'2 v'3 v'4 v'5 t . (OneOf '[Data.Word.Word16, Float] t) => OpParams -> Tensor v'1 t -- ^ __boxes__ -> Tensor v'2 t -- ^ __scores__ -> Tensor v'3 Data.Int.Int32 -- ^ __max_output_size__ -> Tensor v'4 Float -- ^ __iou_threshold__ -> Tensor v'5 Float -- ^ __score_threshold__ -> (Tensor Build Data.Int.Int32, Tensor Build Data.Int.Int32) -- ^ (__selected_indices__, __valid_outputs__) -- -- * __selected_indices__ -- -- * __valid_outputs__ nonMaxSuppressionV4' op'options boxes scores max_output_size iou_threshold score_threshold | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs boxes, buildInputs scores, buildInputs max_output_size, buildInputs iou_threshold, buildInputs score_threshold] return (opDef "NonMaxSuppressionV4" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "boxes" type_attr: "T" } input_arg { name: "scores" type_attr: "T" } input_arg { name: "max_output_size" type: DT_INT32 } input_arg { name: "iou_threshold" type: DT_FLOAT } input_arg { name: "score_threshold" type: DT_FLOAT } output_arg { name: "selected_indices" type: DT_INT32 } output_arg { name: "valid_outputs" type: DT_INT32 } attr { name: "T" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_HALF type: DT_FLOAT } } } attr { name: "pad_to_max_output_size" type: "bool" default_value { b: false } } -} -- | nonMaxSuppressionWithOverlaps :: Tensor v'1 Float -- ^ __overlaps__ -> Tensor v'2 Float -- ^ __scores__ -> Tensor v'3 Data.Int.Int32 -- ^ __max_output_size__ -> Tensor v'4 Float -- ^ __overlap_threshold__ -> Tensor v'5 Float -- ^ __score_threshold__ -> Tensor Build Data.Int.Int32 -- ^ __selected_indices__ nonMaxSuppressionWithOverlaps = nonMaxSuppressionWithOverlaps' id nonMaxSuppressionWithOverlaps' :: OpParams -> Tensor v'1 Float -- ^ __overlaps__ -> Tensor v'2 Float -- ^ __scores__ -> Tensor v'3 Data.Int.Int32 -- ^ __max_output_size__ -> Tensor v'4 Float -- ^ __overlap_threshold__ -> Tensor v'5 Float -- ^ __score_threshold__ -> Tensor Build Data.Int.Int32 -- ^ __selected_indices__ nonMaxSuppressionWithOverlaps' op'options overlaps scores max_output_size overlap_threshold score_threshold | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs overlaps, buildInputs scores, buildInputs max_output_size, buildInputs overlap_threshold, buildInputs score_threshold] return (opDef "NonMaxSuppressionWithOverlaps" & op'options & opInputs .~ op'inputs) {- input_arg { name: "overlaps" type: DT_FLOAT } input_arg { name: "scores" type: DT_FLOAT } input_arg { name: "max_output_size" type: DT_INT32 } input_arg { name: "overlap_threshold" type: DT_FLOAT } input_arg { name: "score_threshold" type: DT_FLOAT } output_arg { name: "selected_indices" type: DT_INT32 } -} -- | notEqual :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Bool, Data.ByteString.ByteString, Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build Bool -- ^ __z__ notEqual = notEqual' id notEqual' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Bool, Data.ByteString.ByteString, Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build Bool -- ^ __z__ notEqual' op'options x y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y] return (opDef "NotEqual" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type: DT_BOOL } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_UINT8 type: DT_INT8 type: DT_INT16 type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_QUINT8 type: DT_QINT8 type: DT_QINT32 type: DT_STRING type: DT_BOOL type: DT_COMPLEX128 } } } -} -- | nthElement :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 Data.Int.Int32 -- ^ __n__ -> Tensor Build t -- ^ __values__ nthElement = nthElement' id nthElement' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 Data.Int.Int32 -- ^ __n__ -> Tensor Build t -- ^ __values__ nthElement' op'options input n | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs n] return (opDef "NthElement" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "n" type: DT_INT32 } output_arg { name: "values" type_attr: "T" } attr { name: "reverse" type: "bool" default_value { b: false } } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_INT64 type: DT_BFLOAT16 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } -} -- | oneHot :: forall v'1 v'2 v'3 v'4 t tI . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64, Data.Word.Word8] tI) => Tensor v'1 tI -- ^ __indices__ -> Tensor v'2 Data.Int.Int32 -- ^ __depth__ -> Tensor v'3 t -- ^ __on_value__ -> Tensor v'4 t -- ^ __off_value__ -> Tensor Build t -- ^ __output__ oneHot = oneHot' id oneHot' :: forall v'1 v'2 v'3 v'4 t tI . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64, Data.Word.Word8] tI) => OpParams -> Tensor v'1 tI -- ^ __indices__ -> Tensor v'2 Data.Int.Int32 -- ^ __depth__ -> Tensor v'3 t -- ^ __on_value__ -> Tensor v'4 t -- ^ __off_value__ -> Tensor Build t -- ^ __output__ oneHot' op'options indices depth on_value off_value | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs indices, buildInputs depth, buildInputs on_value, buildInputs off_value] return (opDef "OneHot" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "TI" .~ tensorType (undefined :: tI) & op'options & opInputs .~ op'inputs) {- input_arg { name: "indices" type_attr: "TI" } input_arg { name: "depth" type: DT_INT32 } input_arg { name: "on_value" type_attr: "T" } input_arg { name: "off_value" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "axis" type: "int" default_value { i: -1 } } attr { name: "T" type: "type" } attr { name: "TI" type: "type" default_value { type: DT_INT64 } allowed_values { list { type: DT_UINT8 type: DT_INT32 type: DT_INT64 } } } -} -- | onesLike :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Bool, Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ onesLike = onesLike' id onesLike' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Bool, Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ onesLike' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "OnesLike" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT8 type: DT_UINT8 type: DT_INT16 type: DT_UINT16 type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_BOOL } } } -} -- | optimizeDataset :: [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __optimizations__ -> Tensor Build Variant -- ^ __handle__ optimizeDataset = optimizeDataset' id optimizeDataset' :: OpParams -> [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __optimizations__ -> Tensor Build Variant -- ^ __handle__ optimizeDataset' op'options output_types input_dataset optimizations | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_dataset, buildInputs optimizations] return (opDef "OptimizeDataset" & opAttr "output_types" .~ output_types & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_dataset" type: DT_VARIANT } input_arg { name: "optimizations" type: DT_STRING } output_arg { name: "handle" type: DT_VARIANT } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } attr { name: "optimization_configs" type: "list(string)" default_value { list { } } } -} -- | optionalFromValue :: forall v'1 toutput_types . (TensorTypes toutput_types) => TensorList (v'1) toutput_types -- ^ __components__ -> Tensor Build Variant -- ^ __optional__ optionalFromValue = optionalFromValue' id optionalFromValue' :: forall v'1 toutput_types . (TensorTypes toutput_types) => OpParams -> TensorList (v'1) toutput_types -- ^ __components__ -> Tensor Build Variant -- ^ __optional__ optionalFromValue' op'options components | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs components] return (opDef "OptionalFromValue" & opAttr "Toutput_types" .~ fromTensorTypes (Proxy :: Proxy toutput_types) & op'options & opInputs .~ op'inputs) {- input_arg { name: "components" type_list_attr: "Toutput_types" } output_arg { name: "optional" type: DT_VARIANT } attr { name: "Toutput_types" type: "list(type)" has_minimum: true minimum: 1 } -} -- | optionalGetValue :: forall v'1 output_types . (TensorTypes output_types) => Tensor v'1 Variant -- ^ __optional__ -> TensorList (Build) output_types -- ^ __components__ optionalGetValue = optionalGetValue' id optionalGetValue' :: forall v'1 output_types . (TensorTypes output_types) => OpParams -> Tensor v'1 Variant -- ^ __optional__ -> TensorList (Build) output_types -- ^ __components__ optionalGetValue' op'options optional | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs optional] return (opDef "OptionalGetValue" & opAttr "output_types" .~ fromTensorTypes (Proxy :: Proxy output_types) & op'options & opInputs .~ op'inputs) {- input_arg { name: "optional" type: DT_VARIANT } output_arg { name: "components" type_list_attr: "output_types" } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } -} -- | optionalHasValue :: Tensor v'1 Variant -- ^ __optional__ -> Tensor Build Bool -- ^ __has_value__ optionalHasValue = optionalHasValue' id optionalHasValue' :: OpParams -> Tensor v'1 Variant -- ^ __optional__ -> Tensor Build Bool -- ^ __has_value__ optionalHasValue' op'options optional | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs optional] return (opDef "OptionalHasValue" & op'options & opInputs .~ op'inputs) {- input_arg { name: "optional" type: DT_VARIANT } output_arg { name: "has_value" type: DT_BOOL } -} -- | optionalNone :: Tensor Build Variant -- ^ __optional__ optionalNone = optionalNone' id optionalNone' :: OpParams -> Tensor Build Variant -- ^ __optional__ optionalNone' op'options | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] return (opDef "OptionalNone" & op'options & opInputs .~ op'inputs) {- output_arg { name: "optional" type: DT_VARIANT } -} -- | orderedMapClear :: forall m' . (MonadBuild m') => [DataType] -- ^ __dtypes__ -> m' (ControlNode) orderedMapClear = orderedMapClear' id orderedMapClear' :: forall m' . (MonadBuild m') => OpParams -> [DataType] -- ^ __dtypes__ -> m' (ControlNode) orderedMapClear' op'options dtypes | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "OrderedMapClear" & opAttr "dtypes" .~ dtypes & op'options & opInputs .~ op'inputs) {- attr { name: "capacity" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "memory_limit" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "dtypes" type: "list(type)" } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } -} -- | orderedMapIncompleteSize :: forall m' . (MonadBuild m') => [DataType] -- ^ __dtypes__ -> m' (Tensor Value Data.Int.Int32) -- ^ __size__ orderedMapIncompleteSize = orderedMapIncompleteSize' id orderedMapIncompleteSize' :: forall m' . (MonadBuild m') => OpParams -> [DataType] -- ^ __dtypes__ -> m' (Tensor Value Data.Int.Int32) -- ^ __size__ orderedMapIncompleteSize' op'options dtypes | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "OrderedMapIncompleteSize" & opAttr "dtypes" .~ dtypes & op'options & opInputs .~ op'inputs) {- output_arg { name: "size" type: DT_INT32 } attr { name: "capacity" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "memory_limit" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "dtypes" type: "list(type)" } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } -} -- | orderedMapPeek :: forall v'1 v'2 dtypes m' . (MonadBuild m', TensorTypes dtypes) => Tensor v'1 Data.Int.Int64 -- ^ __key__ -> Tensor v'2 Data.Int.Int32 -- ^ __indices__ -> m' (TensorList (Value) dtypes) -- ^ __values__ orderedMapPeek = orderedMapPeek' id orderedMapPeek' :: forall v'1 v'2 dtypes m' . (MonadBuild m', TensorTypes dtypes) => OpParams -> Tensor v'1 Data.Int.Int64 -- ^ __key__ -> Tensor v'2 Data.Int.Int32 -- ^ __indices__ -> m' (TensorList (Value) dtypes) -- ^ __values__ orderedMapPeek' op'options key indices | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs key, buildInputs indices] buildOp [] (opDef "OrderedMapPeek" & opAttr "dtypes" .~ fromTensorTypes (Proxy :: Proxy dtypes) & op'options & opInputs .~ op'inputs) {- input_arg { name: "key" type: DT_INT64 } input_arg { name: "indices" type: DT_INT32 } output_arg { name: "values" type_list_attr: "dtypes" } attr { name: "capacity" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "memory_limit" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "dtypes" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } -} -- | orderedMapSize :: forall m' . (MonadBuild m') => [DataType] -- ^ __dtypes__ -> m' (Tensor Value Data.Int.Int32) -- ^ __size__ orderedMapSize = orderedMapSize' id orderedMapSize' :: forall m' . (MonadBuild m') => OpParams -> [DataType] -- ^ __dtypes__ -> m' (Tensor Value Data.Int.Int32) -- ^ __size__ orderedMapSize' op'options dtypes | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "OrderedMapSize" & opAttr "dtypes" .~ dtypes & op'options & opInputs .~ op'inputs) {- output_arg { name: "size" type: DT_INT32 } attr { name: "capacity" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "memory_limit" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "dtypes" type: "list(type)" } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } -} -- | orderedMapStage :: forall v'1 v'2 v'3 fake_dtypes m' . (MonadBuild m', TensorTypes fake_dtypes) => [DataType] -- ^ __dtypes__ -> Tensor v'1 Data.Int.Int64 -- ^ __key__ -> Tensor v'2 Data.Int.Int32 -- ^ __indices__ -> TensorList (v'3) fake_dtypes -- ^ __values__ -> m' (ControlNode) orderedMapStage = orderedMapStage' id orderedMapStage' :: forall v'1 v'2 v'3 fake_dtypes m' . (MonadBuild m', TensorTypes fake_dtypes) => OpParams -> [DataType] -- ^ __dtypes__ -> Tensor v'1 Data.Int.Int64 -- ^ __key__ -> Tensor v'2 Data.Int.Int32 -- ^ __indices__ -> TensorList (v'3) fake_dtypes -- ^ __values__ -> m' (ControlNode) orderedMapStage' op'options dtypes key indices values | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs key, buildInputs indices, buildInputs values] buildOp [] (opDef "OrderedMapStage" & opAttr "fake_dtypes" .~ fromTensorTypes (Proxy :: Proxy fake_dtypes) & opAttr "dtypes" .~ dtypes & op'options & opInputs .~ op'inputs) {- input_arg { name: "key" type: DT_INT64 } input_arg { name: "indices" type: DT_INT32 } input_arg { name: "values" type_list_attr: "fake_dtypes" } attr { name: "capacity" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "memory_limit" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "dtypes" type: "list(type)" } attr { name: "fake_dtypes" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } -} -- | orderedMapUnstage :: forall v'1 v'2 dtypes m' . (MonadBuild m', TensorTypes dtypes) => Tensor v'1 Data.Int.Int64 -- ^ __key__ -> Tensor v'2 Data.Int.Int32 -- ^ __indices__ -> m' (TensorList (Value) dtypes) -- ^ __values__ orderedMapUnstage = orderedMapUnstage' id orderedMapUnstage' :: forall v'1 v'2 dtypes m' . (MonadBuild m', TensorTypes dtypes) => OpParams -> Tensor v'1 Data.Int.Int64 -- ^ __key__ -> Tensor v'2 Data.Int.Int32 -- ^ __indices__ -> m' (TensorList (Value) dtypes) -- ^ __values__ orderedMapUnstage' op'options key indices | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs key, buildInputs indices] buildOp [] (opDef "OrderedMapUnstage" & opAttr "dtypes" .~ fromTensorTypes (Proxy :: Proxy dtypes) & op'options & opInputs .~ op'inputs) {- input_arg { name: "key" type: DT_INT64 } input_arg { name: "indices" type: DT_INT32 } output_arg { name: "values" type_list_attr: "dtypes" } attr { name: "capacity" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "memory_limit" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "dtypes" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } -} -- | orderedMapUnstageNoKey :: forall v'1 dtypes m' . (MonadBuild m', TensorTypes dtypes) => Tensor v'1 Data.Int.Int32 -- ^ __indices__ -> m' ((Tensor Value Data.Int.Int64, TensorList (Value) dtypes)) -- ^ (__key__, __values__) -- -- * __key__ -- -- * __values__ orderedMapUnstageNoKey = orderedMapUnstageNoKey' id orderedMapUnstageNoKey' :: forall v'1 dtypes m' . (MonadBuild m', TensorTypes dtypes) => OpParams -> Tensor v'1 Data.Int.Int32 -- ^ __indices__ -> m' ((Tensor Value Data.Int.Int64, TensorList (Value) dtypes)) -- ^ (__key__, __values__) -- -- * __key__ -- -- * __values__ orderedMapUnstageNoKey' op'options indices | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs indices] buildOp [] (opDef "OrderedMapUnstageNoKey" & opAttr "dtypes" .~ fromTensorTypes (Proxy :: Proxy dtypes) & op'options & opInputs .~ op'inputs) {- input_arg { name: "indices" type: DT_INT32 } output_arg { name: "key" type: DT_INT64 } output_arg { name: "values" type_list_attr: "dtypes" } attr { name: "capacity" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "memory_limit" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "dtypes" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } -} -- | outfeedDequeue :: forall dtype m' . (MonadBuild m', TensorType dtype) => Shape -- ^ __shape__ -> m' (Tensor Value dtype) -- ^ __output__ outfeedDequeue = outfeedDequeue' id outfeedDequeue' :: forall dtype m' . (MonadBuild m', TensorType dtype) => OpParams -> Shape -- ^ __shape__ -> m' (Tensor Value dtype) -- ^ __output__ outfeedDequeue' op'options shape | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "OutfeedDequeue" & opAttr "dtype" .~ tensorType (undefined :: dtype) & opAttr "shape" .~ shape & op'options & opInputs .~ op'inputs) {- output_arg { name: "output" type_attr: "dtype" } attr { name: "dtype" type: "type" } attr { name: "shape" type: "shape" } attr { name: "device_ordinal" type: "int" default_value { i: -1 } } -} -- | outfeedDequeueTuple :: forall dtypes m' . (MonadBuild m', TensorTypes dtypes) => m' (TensorList (Value) dtypes) -- ^ __outputs__ outfeedDequeueTuple = outfeedDequeueTuple' id outfeedDequeueTuple' :: forall dtypes m' . (MonadBuild m', TensorTypes dtypes) => OpParams -> m' (TensorList (Value) dtypes) -- ^ __outputs__ outfeedDequeueTuple' op'options | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "OutfeedDequeueTuple" & opAttr "dtypes" .~ fromTensorTypes (Proxy :: Proxy dtypes) & op'options & opInputs .~ op'inputs) {- output_arg { name: "outputs" type_list_attr: "dtypes" } attr { name: "dtypes" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "shapes" type: "list(shape)" } attr { name: "device_ordinal" type: "int" default_value { i: -1 } } -} -- | outfeedEnqueue :: forall v'1 dtype m' . (MonadBuild m', TensorType dtype) => Tensor v'1 dtype -- ^ __input__ -> m' (ControlNode) outfeedEnqueue = outfeedEnqueue' id outfeedEnqueue' :: forall v'1 dtype m' . (MonadBuild m', TensorType dtype) => OpParams -> Tensor v'1 dtype -- ^ __input__ -> m' (ControlNode) outfeedEnqueue' op'options input | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] buildOp [] (opDef "OutfeedEnqueue" & opAttr "dtype" .~ tensorType (undefined :: dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "dtype" } attr { name: "dtype" type: "type" } -} -- | outfeedEnqueueTuple :: forall v'1 dtypes m' . (MonadBuild m', TensorTypes dtypes) => TensorList (v'1) dtypes -- ^ __inputs__ -> m' (ControlNode) outfeedEnqueueTuple = outfeedEnqueueTuple' id outfeedEnqueueTuple' :: forall v'1 dtypes m' . (MonadBuild m', TensorTypes dtypes) => OpParams -> TensorList (v'1) dtypes -- ^ __inputs__ -> m' (ControlNode) outfeedEnqueueTuple' op'options inputs | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs inputs] buildOp [] (opDef "OutfeedEnqueueTuple" & opAttr "dtypes" .~ fromTensorTypes (Proxy :: Proxy dtypes) & op'options & opInputs .~ op'inputs) {- input_arg { name: "inputs" type_list_attr: "dtypes" } attr { name: "dtypes" type: "list(type)" has_minimum: true minimum: 1 } -} -- | pack :: forall v'1 t . (TensorType t) => [Tensor v'1 t] -- ^ __values__ -> Tensor Build t -- ^ __output__ pack = pack' id pack' :: forall v'1 t . (TensorType t) => OpParams -> [Tensor v'1 t] -- ^ __values__ -> Tensor Build t -- ^ __output__ pack' op'options values | eqLengthGuard [("N", [("values", length values)])] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs values] return (opDef "Pack" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "N" .~ n & op'options & opInputs .~ op'inputs) where n = fromIntegral (length values) :: Int64 {- input_arg { name: "values" type_attr: "T" number_attr: "N" } output_arg { name: "output" type_attr: "T" } attr { name: "N" type: "int" has_minimum: true minimum: 1 } attr { name: "T" type: "type" } attr { name: "axis" type: "int" default_value { i: 0 } } -} -- | pad :: forall v'1 v'2 t tpaddings . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tpaddings) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 tpaddings -- ^ __paddings__ -> Tensor Build t -- ^ __output__ pad = pad' id pad' :: forall v'1 v'2 t tpaddings . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tpaddings) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 tpaddings -- ^ __paddings__ -> Tensor Build t -- ^ __output__ pad' op'options input paddings | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs paddings] return (opDef "Pad" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tpaddings" .~ tensorType (undefined :: tpaddings) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "paddings" type_attr: "Tpaddings" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "Tpaddings" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | padV2 :: forall v'1 v'2 v'3 t tpaddings . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tpaddings) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 tpaddings -- ^ __paddings__ -> Tensor v'3 t -- ^ __constant_values__ -> Tensor Build t -- ^ __output__ padV2 = padV2' id padV2' :: forall v'1 v'2 v'3 t tpaddings . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tpaddings) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 tpaddings -- ^ __paddings__ -> Tensor v'3 t -- ^ __constant_values__ -> Tensor Build t -- ^ __output__ padV2' op'options input paddings constant_values | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs paddings, buildInputs constant_values] return (opDef "PadV2" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tpaddings" .~ tensorType (undefined :: tpaddings) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "paddings" type_attr: "Tpaddings" } input_arg { name: "constant_values" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "Tpaddings" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | paddedBatchDataset :: forall v'1 v'2 v'3 v'4 toutput_types . (TensorTypes toutput_types) => Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor v'2 Data.Int.Int64 -- ^ __batch_size__ -> [Tensor v'3 Data.Int.Int64] -- ^ __padded_shapes__ -> TensorList (v'4) toutput_types -- ^ __padding_values__ -> Tensor Build Variant -- ^ __handle__ paddedBatchDataset = paddedBatchDataset' id paddedBatchDataset' :: forall v'1 v'2 v'3 v'4 toutput_types . (TensorTypes toutput_types) => OpParams -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor v'2 Data.Int.Int64 -- ^ __batch_size__ -> [Tensor v'3 Data.Int.Int64] -- ^ __padded_shapes__ -> TensorList (v'4) toutput_types -- ^ __padding_values__ -> Tensor Build Variant -- ^ __handle__ paddedBatchDataset' op'options input_dataset batch_size padded_shapes padding_values | eqLengthGuard [("N", [("padded_shapes", length padded_shapes)])] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_dataset, buildInputs batch_size, buildInputs padded_shapes, buildInputs padding_values] return (opDef "PaddedBatchDataset" & opAttr "Toutput_types" .~ fromTensorTypes (Proxy :: Proxy toutput_types) & opAttr "N" .~ n & op'options & opInputs .~ op'inputs) where n = fromIntegral (length padded_shapes) :: Int64 {- input_arg { name: "input_dataset" type: DT_VARIANT } input_arg { name: "batch_size" type: DT_INT64 } input_arg { name: "padded_shapes" type: DT_INT64 number_attr: "N" } input_arg { name: "padding_values" type_list_attr: "Toutput_types" } output_arg { name: "handle" type: DT_VARIANT } attr { name: "Toutput_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } attr { name: "N" type: "int" has_minimum: true minimum: 1 } -} -- | paddedBatchDatasetV2 :: forall v'1 v'2 v'3 v'4 v'5 toutput_types . (TensorTypes toutput_types) => Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor v'2 Data.Int.Int64 -- ^ __batch_size__ -> [Tensor v'3 Data.Int.Int64] -- ^ __padded_shapes__ -> TensorList (v'4) toutput_types -- ^ __padding_values__ -> Tensor v'5 Bool -- ^ __drop_remainder__ -> Tensor Build Variant -- ^ __handle__ paddedBatchDatasetV2 = paddedBatchDatasetV2' id paddedBatchDatasetV2' :: forall v'1 v'2 v'3 v'4 v'5 toutput_types . (TensorTypes toutput_types) => OpParams -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor v'2 Data.Int.Int64 -- ^ __batch_size__ -> [Tensor v'3 Data.Int.Int64] -- ^ __padded_shapes__ -> TensorList (v'4) toutput_types -- ^ __padding_values__ -> Tensor v'5 Bool -- ^ __drop_remainder__ -> Tensor Build Variant -- ^ __handle__ paddedBatchDatasetV2' op'options input_dataset batch_size padded_shapes padding_values drop_remainder | eqLengthGuard [("N", [("padded_shapes", length padded_shapes)])] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_dataset, buildInputs batch_size, buildInputs padded_shapes, buildInputs padding_values, buildInputs drop_remainder] return (opDef "PaddedBatchDatasetV2" & opAttr "Toutput_types" .~ fromTensorTypes (Proxy :: Proxy toutput_types) & opAttr "N" .~ n & op'options & opInputs .~ op'inputs) where n = fromIntegral (length padded_shapes) :: Int64 {- input_arg { name: "input_dataset" type: DT_VARIANT } input_arg { name: "batch_size" type: DT_INT64 } input_arg { name: "padded_shapes" type: DT_INT64 number_attr: "N" } input_arg { name: "padding_values" type_list_attr: "Toutput_types" } input_arg { name: "drop_remainder" type: DT_BOOL } output_arg { name: "handle" type: DT_VARIANT } attr { name: "parallel_copy" type: "bool" default_value { b: false } } attr { name: "Toutput_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } attr { name: "N" type: "int" has_minimum: true minimum: 1 } -} -- | paddingFIFOQueue :: forall m' . (MonadBuild m') => [DataType] -- ^ __component_types__ -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __handle__ paddingFIFOQueue = paddingFIFOQueue' id paddingFIFOQueue' :: forall m' . (MonadBuild m') => OpParams -> [DataType] -- ^ __component_types__ -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __handle__ paddingFIFOQueue' op'options component_types | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "PaddingFIFOQueue" & opAttr "component_types" .~ component_types & op'options & opInputs .~ op'inputs) {- output_arg { name: "handle" type: DT_STRING is_ref: true } attr { name: "component_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "shapes" type: "list(shape)" default_value { list { } } has_minimum: true } attr { name: "capacity" type: "int" default_value { i: -1 } } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } -} -- | paddingFIFOQueueV2 :: forall m' . (MonadBuild m') => [DataType] -- ^ __component_types__ -> m' (Tensor Value ResourceHandle) -- ^ __handle__ paddingFIFOQueueV2 = paddingFIFOQueueV2' id paddingFIFOQueueV2' :: forall m' . (MonadBuild m') => OpParams -> [DataType] -- ^ __component_types__ -> m' (Tensor Value ResourceHandle) -- ^ __handle__ paddingFIFOQueueV2' op'options component_types | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "PaddingFIFOQueueV2" & opAttr "component_types" .~ component_types & op'options & opInputs .~ op'inputs) {- output_arg { name: "handle" type: DT_RESOURCE } attr { name: "component_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "shapes" type: "list(shape)" default_value { list { } } has_minimum: true } attr { name: "capacity" type: "int" default_value { i: -1 } } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } -} -- | parallelConcat :: forall v'1 t . (TensorType t) => Shape -- ^ __shape__ -> [Tensor v'1 t] -- ^ __values__ -> Tensor Build t -- ^ __output__ parallelConcat = parallelConcat' id parallelConcat' :: forall v'1 t . (TensorType t) => OpParams -> Shape -- ^ __shape__ -> [Tensor v'1 t] -- ^ __values__ -> Tensor Build t -- ^ __output__ parallelConcat' op'options shape values | eqLengthGuard [("N", [("values", length values)])] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs values] return (opDef "ParallelConcat" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "shape" .~ shape & opAttr "N" .~ n & op'options & opInputs .~ op'inputs) where n = fromIntegral (length values) :: Int64 {- input_arg { name: "values" type_attr: "T" number_attr: "N" } output_arg { name: "output" type_attr: "T" } attr { name: "N" type: "int" has_minimum: true minimum: 1 } attr { name: "T" type: "type" } attr { name: "shape" type: "shape" } -} -- | parallelDynamicStitch :: forall v'1 v'2 t . (TensorType t) => [Tensor v'1 Data.Int.Int32] -- ^ __indices__ -> [Tensor v'2 t] -- ^ __data__ -> Tensor Build t -- ^ __merged__ parallelDynamicStitch = parallelDynamicStitch' id parallelDynamicStitch' :: forall v'1 v'2 t . (TensorType t) => OpParams -> [Tensor v'1 Data.Int.Int32] -- ^ __indices__ -> [Tensor v'2 t] -- ^ __data__ -> Tensor Build t -- ^ __merged__ parallelDynamicStitch' op'options indices data' | eqLengthGuard [("N", [("indices", length indices), ("data", length data')])] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs indices, buildInputs data'] return (opDef "ParallelDynamicStitch" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "N" .~ n & op'options & opInputs .~ op'inputs) where n = fromIntegral (length indices) :: Int64 {- input_arg { name: "indices" type: DT_INT32 number_attr: "N" } input_arg { name: "data" type_attr: "T" number_attr: "N" } output_arg { name: "merged" type_attr: "T" } attr { name: "N" type: "int" has_minimum: true minimum: 1 } attr { name: "T" type: "type" } -} -- | parameterizedTruncatedNormal :: forall v'1 v'2 v'3 v'4 v'5 dtype t m' . (MonadBuild m', OneOf '[Data.Word.Word16, Double, Float] dtype, OneOf '[Data.Int.Int32, Data.Int.Int64] t) => Tensor v'1 t -- ^ __shape__ -> Tensor v'2 dtype -- ^ __means__ -> Tensor v'3 dtype -- ^ __stdevs__ -> Tensor v'4 dtype -- ^ __minvals__ -> Tensor v'5 dtype -- ^ __maxvals__ -> m' (Tensor Value dtype) -- ^ __output__ parameterizedTruncatedNormal = parameterizedTruncatedNormal' id parameterizedTruncatedNormal' :: forall v'1 v'2 v'3 v'4 v'5 dtype t m' . (MonadBuild m', OneOf '[Data.Word.Word16, Double, Float] dtype, OneOf '[Data.Int.Int32, Data.Int.Int64] t) => OpParams -> Tensor v'1 t -- ^ __shape__ -> Tensor v'2 dtype -- ^ __means__ -> Tensor v'3 dtype -- ^ __stdevs__ -> Tensor v'4 dtype -- ^ __minvals__ -> Tensor v'5 dtype -- ^ __maxvals__ -> m' (Tensor Value dtype) -- ^ __output__ parameterizedTruncatedNormal' op'options shape means stdevs minvals maxvals | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs shape, buildInputs means, buildInputs stdevs, buildInputs minvals, buildInputs maxvals] buildOp [] (opDef "ParameterizedTruncatedNormal" & opAttr "dtype" .~ tensorType (undefined :: dtype) & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "shape" type_attr: "T" } input_arg { name: "means" type_attr: "dtype" } input_arg { name: "stdevs" type_attr: "dtype" } input_arg { name: "minvals" type_attr: "dtype" } input_arg { name: "maxvals" type_attr: "dtype" } output_arg { name: "output" type_attr: "dtype" } attr { name: "seed" type: "int" default_value { i: 0 } } attr { name: "seed2" type: "int" default_value { i: 0 } } attr { name: "dtype" type: "type" allowed_values { list { type: DT_HALF type: DT_BFLOAT16 type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "T" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | parseExample :: forall v'1 v'2 v'3 v'4 v'5 sparse_types tdense . (OneOfs '[Data.ByteString.ByteString, Data.Int.Int64, Float] sparse_types, OneOfs '[Data.ByteString.ByteString, Data.Int.Int64, Float] tdense) => Tensor v'1 Data.ByteString.ByteString -- ^ __serialized__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __names__ -> [Tensor v'3 Data.ByteString.ByteString] -- ^ __sparse_keys__ -> [Tensor v'4 Data.ByteString.ByteString] -- ^ __dense_keys__ -> TensorList (v'5) tdense -- ^ __dense_defaults__ -> ([Tensor Build Data.Int.Int64], TensorList (Build) sparse_types, [Tensor Build Data.Int.Int64], TensorList (Build) tdense) -- ^ (__sparse_indices__, __sparse_values__, __sparse_shapes__, __dense_values__) -- -- * __sparse_indices__ -- -- * __sparse_values__ -- -- * __sparse_shapes__ -- -- * __dense_values__ parseExample = parseExample' id parseExample' :: forall v'1 v'2 v'3 v'4 v'5 sparse_types tdense . (OneOfs '[Data.ByteString.ByteString, Data.Int.Int64, Float] sparse_types, OneOfs '[Data.ByteString.ByteString, Data.Int.Int64, Float] tdense) => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __serialized__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __names__ -> [Tensor v'3 Data.ByteString.ByteString] -- ^ __sparse_keys__ -> [Tensor v'4 Data.ByteString.ByteString] -- ^ __dense_keys__ -> TensorList (v'5) tdense -- ^ __dense_defaults__ -> ([Tensor Build Data.Int.Int64], TensorList (Build) sparse_types, [Tensor Build Data.Int.Int64], TensorList (Build) tdense) -- ^ (__sparse_indices__, __sparse_values__, __sparse_shapes__, __dense_values__) -- -- * __sparse_indices__ -- -- * __sparse_values__ -- -- * __sparse_shapes__ -- -- * __dense_values__ parseExample' op'options serialized names sparse_keys dense_keys dense_defaults | eqLengthGuard [("Nsparse", [("sparse_keys", length sparse_keys)]), ("Ndense", [("dense_keys", length dense_keys)])] = pureOp [nsparse, nsparse] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs serialized, buildInputs names, buildInputs sparse_keys, buildInputs dense_keys, buildInputs dense_defaults] return (opDef "ParseExample" & opAttr "sparse_types" .~ fromTensorTypes (Proxy :: Proxy sparse_types) & opAttr "Tdense" .~ fromTensorTypes (Proxy :: Proxy tdense) & opAttr "Nsparse" .~ nsparse & opAttr "Ndense" .~ ndense & op'options & opInputs .~ op'inputs) where nsparse = fromIntegral (length sparse_keys) :: Int64 ndense = fromIntegral (length dense_keys) :: Int64 {- input_arg { name: "serialized" type: DT_STRING } input_arg { name: "names" type: DT_STRING } input_arg { name: "sparse_keys" type: DT_STRING number_attr: "Nsparse" } input_arg { name: "dense_keys" type: DT_STRING number_attr: "Ndense" } input_arg { name: "dense_defaults" type_list_attr: "Tdense" } output_arg { name: "sparse_indices" type: DT_INT64 number_attr: "Nsparse" } output_arg { name: "sparse_values" type_list_attr: "sparse_types" } output_arg { name: "sparse_shapes" type: DT_INT64 number_attr: "Nsparse" } output_arg { name: "dense_values" type_list_attr: "Tdense" } attr { name: "Nsparse" type: "int" has_minimum: true } attr { name: "Ndense" type: "int" has_minimum: true } attr { name: "sparse_types" type: "list(type)" has_minimum: true allowed_values { list { type: DT_FLOAT type: DT_INT64 type: DT_STRING } } } attr { name: "Tdense" type: "list(type)" has_minimum: true allowed_values { list { type: DT_FLOAT type: DT_INT64 type: DT_STRING } } } attr { name: "dense_shapes" type: "list(shape)" has_minimum: true } -} -- | parseSingleExample :: forall v'1 v'2 sparse_types tdense . (OneOfs '[Data.ByteString.ByteString, Data.Int.Int64, Float] sparse_types, OneOfs '[Data.ByteString.ByteString, Data.Int.Int64, Float] tdense) => Data.Int.Int64 -- ^ __num_sparse__ -> Tensor v'1 Data.ByteString.ByteString -- ^ __serialized__ -> TensorList (v'2) tdense -- ^ __dense_defaults__ -> ([Tensor Build Data.Int.Int64], TensorList (Build) sparse_types, [Tensor Build Data.Int.Int64], TensorList (Build) tdense) -- ^ (__sparse_indices__, __sparse_values__, __sparse_shapes__, __dense_values__) -- -- * __sparse_indices__ -- -- * __sparse_values__ -- -- * __sparse_shapes__ -- -- * __dense_values__ parseSingleExample = parseSingleExample' id parseSingleExample' :: forall v'1 v'2 sparse_types tdense . (OneOfs '[Data.ByteString.ByteString, Data.Int.Int64, Float] sparse_types, OneOfs '[Data.ByteString.ByteString, Data.Int.Int64, Float] tdense) => OpParams -> Data.Int.Int64 -- ^ __num_sparse__ -> Tensor v'1 Data.ByteString.ByteString -- ^ __serialized__ -> TensorList (v'2) tdense -- ^ __dense_defaults__ -> ([Tensor Build Data.Int.Int64], TensorList (Build) sparse_types, [Tensor Build Data.Int.Int64], TensorList (Build) tdense) -- ^ (__sparse_indices__, __sparse_values__, __sparse_shapes__, __dense_values__) -- -- * __sparse_indices__ -- -- * __sparse_values__ -- -- * __sparse_shapes__ -- -- * __dense_values__ parseSingleExample' op'options num_sparse serialized dense_defaults | eqLengthGuard [] = pureOp [num_sparse, num_sparse] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs serialized, buildInputs dense_defaults] return (opDef "ParseSingleExample" & opAttr "sparse_types" .~ fromTensorTypes (Proxy :: Proxy sparse_types) & opAttr "Tdense" .~ fromTensorTypes (Proxy :: Proxy tdense) & opAttr "num_sparse" .~ num_sparse & op'options & opInputs .~ op'inputs) {- input_arg { name: "serialized" type: DT_STRING } input_arg { name: "dense_defaults" type_list_attr: "Tdense" } output_arg { name: "sparse_indices" type: DT_INT64 number_attr: "num_sparse" } output_arg { name: "sparse_values" type_list_attr: "sparse_types" } output_arg { name: "sparse_shapes" type: DT_INT64 number_attr: "num_sparse" } output_arg { name: "dense_values" type_list_attr: "Tdense" } attr { name: "num_sparse" type: "int" has_minimum: true } attr { name: "sparse_keys" type: "list(string)" has_minimum: true } attr { name: "dense_keys" type: "list(string)" has_minimum: true } attr { name: "sparse_types" type: "list(type)" has_minimum: true allowed_values { list { type: DT_FLOAT type: DT_INT64 type: DT_STRING } } } attr { name: "Tdense" type: "list(type)" has_minimum: true allowed_values { list { type: DT_FLOAT type: DT_INT64 type: DT_STRING } } } attr { name: "dense_shapes" type: "list(shape)" has_minimum: true } -} -- | parseSingleSequenceExample :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 context_sparse_types tcontext_dense feature_list_dense_types feature_list_sparse_types . (OneOfs '[Data.ByteString.ByteString, Data.Int.Int64, Float] context_sparse_types, OneOfs '[Data.ByteString.ByteString, Data.Int.Int64, Float] tcontext_dense, OneOfs '[Data.ByteString.ByteString, Data.Int.Int64, Float] feature_list_dense_types, OneOfs '[Data.ByteString.ByteString, Data.Int.Int64, Float] feature_list_sparse_types) => Tensor v'1 Data.ByteString.ByteString -- ^ __serialized__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __feature_list_dense_missing_assumed_empty__ -> [Tensor v'3 Data.ByteString.ByteString] -- ^ __context_sparse_keys__ -> [Tensor v'4 Data.ByteString.ByteString] -- ^ __context_dense_keys__ -> [Tensor v'5 Data.ByteString.ByteString] -- ^ __feature_list_sparse_keys__ -> [Tensor v'6 Data.ByteString.ByteString] -- ^ __feature_list_dense_keys__ -> TensorList (v'7) tcontext_dense -- ^ __context_dense_defaults__ -> Tensor v'8 Data.ByteString.ByteString -- ^ __debug_name__ -> ([Tensor Build Data.Int.Int64], TensorList (Build) context_sparse_types, [Tensor Build Data.Int.Int64], TensorList (Build) tcontext_dense, [Tensor Build Data.Int.Int64], TensorList (Build) feature_list_sparse_types, [Tensor Build Data.Int.Int64], TensorList (Build) feature_list_dense_types) -- ^ (__context_sparse_indices__, __context_sparse_values__, __context_sparse_shapes__, __context_dense_values__, __feature_list_sparse_indices__, __feature_list_sparse_values__, __feature_list_sparse_shapes__, __feature_list_dense_values__) -- -- * __context_sparse_indices__ -- -- * __context_sparse_values__ -- -- * __context_sparse_shapes__ -- -- * __context_dense_values__ -- -- * __feature_list_sparse_indices__ -- -- * __feature_list_sparse_values__ -- -- * __feature_list_sparse_shapes__ -- -- * __feature_list_dense_values__ parseSingleSequenceExample = parseSingleSequenceExample' id parseSingleSequenceExample' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 context_sparse_types tcontext_dense feature_list_dense_types feature_list_sparse_types . (OneOfs '[Data.ByteString.ByteString, Data.Int.Int64, Float] context_sparse_types, OneOfs '[Data.ByteString.ByteString, Data.Int.Int64, Float] tcontext_dense, OneOfs '[Data.ByteString.ByteString, Data.Int.Int64, Float] feature_list_dense_types, OneOfs '[Data.ByteString.ByteString, Data.Int.Int64, Float] feature_list_sparse_types) => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __serialized__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __feature_list_dense_missing_assumed_empty__ -> [Tensor v'3 Data.ByteString.ByteString] -- ^ __context_sparse_keys__ -> [Tensor v'4 Data.ByteString.ByteString] -- ^ __context_dense_keys__ -> [Tensor v'5 Data.ByteString.ByteString] -- ^ __feature_list_sparse_keys__ -> [Tensor v'6 Data.ByteString.ByteString] -- ^ __feature_list_dense_keys__ -> TensorList (v'7) tcontext_dense -- ^ __context_dense_defaults__ -> Tensor v'8 Data.ByteString.ByteString -- ^ __debug_name__ -> ([Tensor Build Data.Int.Int64], TensorList (Build) context_sparse_types, [Tensor Build Data.Int.Int64], TensorList (Build) tcontext_dense, [Tensor Build Data.Int.Int64], TensorList (Build) feature_list_sparse_types, [Tensor Build Data.Int.Int64], TensorList (Build) feature_list_dense_types) -- ^ (__context_sparse_indices__, __context_sparse_values__, __context_sparse_shapes__, __context_dense_values__, __feature_list_sparse_indices__, __feature_list_sparse_values__, __feature_list_sparse_shapes__, __feature_list_dense_values__) -- -- * __context_sparse_indices__ -- -- * __context_sparse_values__ -- -- * __context_sparse_shapes__ -- -- * __context_dense_values__ -- -- * __feature_list_sparse_indices__ -- -- * __feature_list_sparse_values__ -- -- * __feature_list_sparse_shapes__ -- -- * __feature_list_dense_values__ parseSingleSequenceExample' op'options serialized feature_list_dense_missing_assumed_empty context_sparse_keys context_dense_keys feature_list_sparse_keys feature_list_dense_keys context_dense_defaults debug_name | eqLengthGuard [("Ncontext_sparse", [("context_sparse_keys", length context_sparse_keys)]), ("Ncontext_dense", [("context_dense_keys", length context_dense_keys)]), ("Nfeature_list_sparse", [("feature_list_sparse_keys", length feature_list_sparse_keys)]), ("Nfeature_list_dense", [("feature_list_dense_keys", length feature_list_dense_keys)])] = pureOp [ncontext_sparse, ncontext_sparse, nfeature_list_sparse, nfeature_list_sparse] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs serialized, buildInputs feature_list_dense_missing_assumed_empty, buildInputs context_sparse_keys, buildInputs context_dense_keys, buildInputs feature_list_sparse_keys, buildInputs feature_list_dense_keys, buildInputs context_dense_defaults, buildInputs debug_name] return (opDef "ParseSingleSequenceExample" & opAttr "context_sparse_types" .~ fromTensorTypes (Proxy :: Proxy context_sparse_types) & opAttr "Tcontext_dense" .~ fromTensorTypes (Proxy :: Proxy tcontext_dense) & opAttr "feature_list_dense_types" .~ fromTensorTypes (Proxy :: Proxy feature_list_dense_types) & opAttr "feature_list_sparse_types" .~ fromTensorTypes (Proxy :: Proxy feature_list_sparse_types) & opAttr "Ncontext_sparse" .~ ncontext_sparse & opAttr "Ncontext_dense" .~ ncontext_dense & opAttr "Nfeature_list_sparse" .~ nfeature_list_sparse & opAttr "Nfeature_list_dense" .~ nfeature_list_dense & op'options & opInputs .~ op'inputs) where ncontext_sparse = fromIntegral (length context_sparse_keys) :: Int64 ncontext_dense = fromIntegral (length context_dense_keys) :: Int64 nfeature_list_sparse = fromIntegral (length feature_list_sparse_keys) :: Int64 nfeature_list_dense = fromIntegral (length feature_list_dense_keys) :: Int64 {- input_arg { name: "serialized" type: DT_STRING } input_arg { name: "feature_list_dense_missing_assumed_empty" type: DT_STRING } input_arg { name: "context_sparse_keys" type: DT_STRING number_attr: "Ncontext_sparse" } input_arg { name: "context_dense_keys" type: DT_STRING number_attr: "Ncontext_dense" } input_arg { name: "feature_list_sparse_keys" type: DT_STRING number_attr: "Nfeature_list_sparse" } input_arg { name: "feature_list_dense_keys" type: DT_STRING number_attr: "Nfeature_list_dense" } input_arg { name: "context_dense_defaults" type_list_attr: "Tcontext_dense" } input_arg { name: "debug_name" type: DT_STRING } output_arg { name: "context_sparse_indices" type: DT_INT64 number_attr: "Ncontext_sparse" } output_arg { name: "context_sparse_values" type_list_attr: "context_sparse_types" } output_arg { name: "context_sparse_shapes" type: DT_INT64 number_attr: "Ncontext_sparse" } output_arg { name: "context_dense_values" type_list_attr: "Tcontext_dense" } output_arg { name: "feature_list_sparse_indices" type: DT_INT64 number_attr: "Nfeature_list_sparse" } output_arg { name: "feature_list_sparse_values" type_list_attr: "feature_list_sparse_types" } output_arg { name: "feature_list_sparse_shapes" type: DT_INT64 number_attr: "Nfeature_list_sparse" } output_arg { name: "feature_list_dense_values" type_list_attr: "feature_list_dense_types" } attr { name: "Ncontext_sparse" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "Ncontext_dense" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "Nfeature_list_sparse" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "Nfeature_list_dense" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "context_sparse_types" type: "list(type)" default_value { list { } } has_minimum: true allowed_values { list { type: DT_FLOAT type: DT_INT64 type: DT_STRING } } } attr { name: "Tcontext_dense" type: "list(type)" default_value { list { } } has_minimum: true allowed_values { list { type: DT_FLOAT type: DT_INT64 type: DT_STRING } } } attr { name: "feature_list_dense_types" type: "list(type)" default_value { list { } } has_minimum: true allowed_values { list { type: DT_FLOAT type: DT_INT64 type: DT_STRING } } } attr { name: "context_dense_shapes" type: "list(shape)" default_value { list { } } has_minimum: true } attr { name: "feature_list_sparse_types" type: "list(type)" default_value { list { } } has_minimum: true allowed_values { list { type: DT_FLOAT type: DT_INT64 type: DT_STRING } } } attr { name: "feature_list_dense_shapes" type: "list(shape)" default_value { list { } } has_minimum: true } -} -- | parseTensor :: forall v'1 out_type . (TensorType out_type) => Tensor v'1 Data.ByteString.ByteString -- ^ __serialized__ -> Tensor Build out_type -- ^ __output__ parseTensor = parseTensor' id parseTensor' :: forall v'1 out_type . (TensorType out_type) => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __serialized__ -> Tensor Build out_type -- ^ __output__ parseTensor' op'options serialized | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs serialized] return (opDef "ParseTensor" & opAttr "out_type" .~ tensorType (undefined :: out_type) & op'options & opInputs .~ op'inputs) {- input_arg { name: "serialized" type: DT_STRING } output_arg { name: "output" type_attr: "out_type" } attr { name: "out_type" type: "type" } -} -- | placeholder :: forall dtype . (TensorType dtype) => Tensor Build dtype -- ^ __output__ placeholder = placeholder' id placeholder' :: forall dtype . (TensorType dtype) => OpParams -> Tensor Build dtype -- ^ __output__ placeholder' op'options | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] return (opDef "Placeholder" & opAttr "dtype" .~ tensorType (undefined :: dtype) & op'options & opInputs .~ op'inputs) {- output_arg { name: "output" type_attr: "dtype" } attr { name: "dtype" type: "type" } attr { name: "shape" type: "shape" default_value { shape { unknown_rank: true } } } -} -- | placeholderV2 :: forall dtype . (TensorType dtype) => Shape -- ^ __shape__ -> Tensor Build dtype -- ^ __output__ placeholderV2 = placeholderV2' id placeholderV2' :: forall dtype . (TensorType dtype) => OpParams -> Shape -- ^ __shape__ -> Tensor Build dtype -- ^ __output__ placeholderV2' op'options shape | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] return (opDef "PlaceholderV2" & opAttr "dtype" .~ tensorType (undefined :: dtype) & opAttr "shape" .~ shape & op'options & opInputs .~ op'inputs) {- output_arg { name: "output" type_attr: "dtype" } attr { name: "dtype" type: "type" } attr { name: "shape" type: "shape" } -} -- | placeholderWithDefault :: forall v'1 dtype . (TensorType dtype) => Shape -- ^ __shape__ -> Tensor v'1 dtype -- ^ __input__ -> Tensor Build dtype -- ^ __output__ placeholderWithDefault = placeholderWithDefault' id placeholderWithDefault' :: forall v'1 dtype . (TensorType dtype) => OpParams -> Shape -- ^ __shape__ -> Tensor v'1 dtype -- ^ __input__ -> Tensor Build dtype -- ^ __output__ placeholderWithDefault' op'options shape input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "PlaceholderWithDefault" & opAttr "dtype" .~ tensorType (undefined :: dtype) & opAttr "shape" .~ shape & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "dtype" } output_arg { name: "output" type_attr: "dtype" } attr { name: "dtype" type: "type" } attr { name: "shape" type: "shape" } -} -- | polygamma :: forall v'1 v'2 t . (OneOf '[Double, Float] t) => Tensor v'1 t -- ^ __a__ -> Tensor v'2 t -- ^ __x__ -> Tensor Build t -- ^ __z__ polygamma = polygamma' id polygamma' :: forall v'1 v'2 t . (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __a__ -> Tensor v'2 t -- ^ __x__ -> Tensor Build t -- ^ __z__ polygamma' op'options a x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs a, buildInputs x] return (opDef "Polygamma" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "a" type_attr: "T" } input_arg { name: "x" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } -} -- | populationCount :: forall v'1 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build Data.Word.Word8 -- ^ __y__ populationCount = populationCount' id populationCount' :: forall v'1 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build Data.Word.Word8 -- ^ __y__ populationCount' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "PopulationCount" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type: DT_UINT8 } attr { name: "T" type: "type" allowed_values { list { type: DT_INT8 type: DT_INT16 type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_UINT16 type: DT_UINT32 type: DT_UINT64 } } } -} -- | pow :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ pow = pow' id pow' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ pow' op'options x y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y] return (opDef "Pow" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_FLOAT type: DT_HALF type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | prefetchDataset :: [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor v'2 Data.Int.Int64 -- ^ __buffer_size__ -> Tensor Build Variant -- ^ __handle__ prefetchDataset = prefetchDataset' id prefetchDataset' :: OpParams -> [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor v'2 Data.Int.Int64 -- ^ __buffer_size__ -> Tensor Build Variant -- ^ __handle__ prefetchDataset' op'options output_types input_dataset buffer_size | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_dataset, buildInputs buffer_size] return (opDef "PrefetchDataset" & opAttr "output_types" .~ output_types & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_dataset" type: DT_VARIANT } input_arg { name: "buffer_size" type: DT_INT64 } output_arg { name: "handle" type: DT_VARIANT } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } attr { name: "slack_period" type: "int" default_value { i: 0 } } -} -- | prelinearize :: forall v'1 dtype . (TensorType dtype) => Tensor v'1 dtype -- ^ __input__ -> Tensor Build Variant -- ^ __output__ prelinearize = prelinearize' id prelinearize' :: forall v'1 dtype . (TensorType dtype) => OpParams -> Tensor v'1 dtype -- ^ __input__ -> Tensor Build Variant -- ^ __output__ prelinearize' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "Prelinearize" & opAttr "dtype" .~ tensorType (undefined :: dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "dtype" } output_arg { name: "output" type: DT_VARIANT } attr { name: "dtype" type: "type" } attr { name: "shape" type: "shape" default_value { shape { } } } attr { name: "layout" type: "list(int)" default_value { list { } } } -} -- | prelinearizeTuple :: forall v'1 dtypes . (TensorTypes dtypes) => TensorList (v'1) dtypes -- ^ __inputs__ -> Tensor Build Variant -- ^ __output__ prelinearizeTuple = prelinearizeTuple' id prelinearizeTuple' :: forall v'1 dtypes . (TensorTypes dtypes) => OpParams -> TensorList (v'1) dtypes -- ^ __inputs__ -> Tensor Build Variant -- ^ __output__ prelinearizeTuple' op'options inputs | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs inputs] return (opDef "PrelinearizeTuple" & opAttr "dtypes" .~ fromTensorTypes (Proxy :: Proxy dtypes) & op'options & opInputs .~ op'inputs) {- input_arg { name: "inputs" type_list_attr: "dtypes" } output_arg { name: "output" type: DT_VARIANT } attr { name: "dtypes" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "shapes" type: "list(shape)" } attr { name: "layouts" type: "list(int)" default_value { list { } } } -} -- | preventGradient :: forall v'1 t . (TensorType t) => Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ preventGradient = preventGradient' id preventGradient' :: forall v'1 t . (TensorType t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ preventGradient' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "PreventGradient" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "message" type: "string" default_value { s: "" } } -} -- | print :: forall v'1 v'2 t u m' . (MonadBuild m', TensorType t, TensorTypes u) => Tensor v'1 t -- ^ __input__ -> TensorList (v'2) u -- ^ __data__ -> m' (Tensor Value t) -- ^ __output__ print = print' id print' :: forall v'1 v'2 t u m' . (MonadBuild m', TensorType t, TensorTypes u) => OpParams -> Tensor v'1 t -- ^ __input__ -> TensorList (v'2) u -- ^ __data__ -> m' (Tensor Value t) -- ^ __output__ print' op'options input data' | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs data'] buildOp [] (opDef "Print" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "U" .~ fromTensorTypes (Proxy :: Proxy u) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "data" type_list_attr: "U" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "U" type: "list(type)" has_minimum: true } attr { name: "message" type: "string" default_value { s: "" } } attr { name: "first_n" type: "int" default_value { i: -1 } } attr { name: "summarize" type: "int" default_value { i: 3 } } -} -- | printV2 :: forall v'1 m' . (MonadBuild m') => Tensor v'1 Data.ByteString.ByteString -- ^ __input__ -> m' (ControlNode) printV2 = printV2' id printV2' :: forall v'1 m' . (MonadBuild m') => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __input__ -> m' (ControlNode) printV2' op'options input | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] buildOp [] (opDef "PrintV2" & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type: DT_STRING } attr { name: "output_stream" type: "string" default_value { s: "stderr" } } attr { name: "end" type: "string" default_value { s: "\n" } } -} -- | priorityQueue :: forall m' . (MonadBuild m') => m' (Tensor Ref Data.ByteString.ByteString) -- ^ __handle__ priorityQueue = priorityQueue' id priorityQueue' :: forall m' . (MonadBuild m') => OpParams -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __handle__ priorityQueue' op'options | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "PriorityQueue" & op'options & opInputs .~ op'inputs) {- output_arg { name: "handle" type: DT_STRING is_ref: true } attr { name: "component_types" type: "list(type)" default_value { list { } } has_minimum: true } attr { name: "shapes" type: "list(shape)" has_minimum: true } attr { name: "capacity" type: "int" default_value { i: -1 } } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } -} -- | priorityQueueV2 :: forall m' . (MonadBuild m') => m' (Tensor Value ResourceHandle) -- ^ __handle__ priorityQueueV2 = priorityQueueV2' id priorityQueueV2' :: forall m' . (MonadBuild m') => OpParams -> m' (Tensor Value ResourceHandle) -- ^ __handle__ priorityQueueV2' op'options | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "PriorityQueueV2" & op'options & opInputs .~ op'inputs) {- output_arg { name: "handle" type: DT_RESOURCE } attr { name: "component_types" type: "list(type)" default_value { list { } } has_minimum: true } attr { name: "shapes" type: "list(shape)" has_minimum: true } attr { name: "capacity" type: "int" default_value { i: -1 } } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } -} -- | prod :: forall v'1 v'2 t tidx . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 tidx -- ^ __reduction_indices__ -> Tensor Build t -- ^ __output__ prod = prod' id prod' :: forall v'1 v'2 t tidx . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 tidx -- ^ __reduction_indices__ -> Tensor Build t -- ^ __output__ prod' op'options input reduction_indices | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs reduction_indices] return (opDef "Prod" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tidx" .~ tensorType (undefined :: tidx) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "reduction_indices" type_attr: "Tidx" } output_arg { name: "output" type_attr: "T" } attr { name: "keep_dims" type: "bool" default_value { b: false } } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tidx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | pyFunc :: forall v'1 tin tout m' . (MonadBuild m', TensorTypes tin, TensorTypes tout) => TensorList (v'1) tin -- ^ __input__ -> m' (TensorList (Value) tout) -- ^ __output__ pyFunc = pyFunc' id pyFunc' :: forall v'1 tin tout m' . (MonadBuild m', TensorTypes tin, TensorTypes tout) => OpParams -> TensorList (v'1) tin -- ^ __input__ -> m' (TensorList (Value) tout) -- ^ __output__ pyFunc' op'options input | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] buildOp [] (opDef "PyFunc" & opAttr "Tin" .~ fromTensorTypes (Proxy :: Proxy tin) & opAttr "Tout" .~ fromTensorTypes (Proxy :: Proxy tout) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_list_attr: "Tin" } output_arg { name: "output" type_list_attr: "Tout" } attr { name: "token" type: "string" } attr { name: "Tin" type: "list(type)" has_minimum: true } attr { name: "Tout" type: "list(type)" has_minimum: true } -} -- | pyFuncStateless :: forall v'1 tin tout . (TensorTypes tin, TensorTypes tout) => TensorList (v'1) tin -- ^ __input__ -> TensorList (Build) tout -- ^ __output__ pyFuncStateless = pyFuncStateless' id pyFuncStateless' :: forall v'1 tin tout . (TensorTypes tin, TensorTypes tout) => OpParams -> TensorList (v'1) tin -- ^ __input__ -> TensorList (Build) tout -- ^ __output__ pyFuncStateless' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "PyFuncStateless" & opAttr "Tin" .~ fromTensorTypes (Proxy :: Proxy tin) & opAttr "Tout" .~ fromTensorTypes (Proxy :: Proxy tout) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_list_attr: "Tin" } output_arg { name: "output" type_list_attr: "Tout" } attr { name: "token" type: "string" } attr { name: "Tin" type: "list(type)" has_minimum: true } attr { name: "Tout" type: "list(type)" has_minimum: true } -} -- | qr :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __input__ -> (Tensor Build t, Tensor Build t) -- ^ (__q__, __r__) -- -- * __q__ -- -- * __r__ qr = qr' id qr' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> (Tensor Build t, Tensor Build t) -- ^ (__q__, __r__) -- -- * __q__ -- -- * __r__ qr' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "Qr" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "q" type_attr: "T" } output_arg { name: "r" type_attr: "T" } attr { name: "full_matrices" type: "bool" default_value { b: false } } attr { name: "T" type: "type" allowed_values { list { type: DT_DOUBLE type: DT_FLOAT type: DT_HALF type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | quantizeAndDequantize :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ quantizeAndDequantize = quantizeAndDequantize' id quantizeAndDequantize' :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ quantizeAndDequantize' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "QuantizeAndDequantize" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "signed_input" type: "bool" default_value { b: true } } attr { name: "num_bits" type: "int" default_value { i: 8 } } attr { name: "range_given" type: "bool" default_value { b: false } } attr { name: "input_min" type: "float" default_value { f: 0.0 } } attr { name: "input_max" type: "float" default_value { f: 0.0 } } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } -} -- | quantizeAndDequantizeV2 :: forall v'1 v'2 v'3 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 t -- ^ __input_min__ -> Tensor v'3 t -- ^ __input_max__ -> Tensor Build t -- ^ __output__ quantizeAndDequantizeV2 = quantizeAndDequantizeV2' id quantizeAndDequantizeV2' :: forall v'1 v'2 v'3 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 t -- ^ __input_min__ -> Tensor v'3 t -- ^ __input_max__ -> Tensor Build t -- ^ __output__ quantizeAndDequantizeV2' op'options input input_min input_max | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs input_min, buildInputs input_max] return (opDef "QuantizeAndDequantizeV2" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "input_min" type_attr: "T" } input_arg { name: "input_max" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "signed_input" type: "bool" default_value { b: true } } attr { name: "num_bits" type: "int" default_value { i: 8 } } attr { name: "range_given" type: "bool" default_value { b: false } } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "round_mode" type: "string" default_value { s: "HALF_TO_EVEN" } allowed_values { list { s: "HALF_TO_EVEN" s: "HALF_UP" } } } -} -- | quantizeAndDequantizeV3 :: forall v'1 v'2 v'3 v'4 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 t -- ^ __input_min__ -> Tensor v'3 t -- ^ __input_max__ -> Tensor v'4 Data.Int.Int32 -- ^ __num_bits__ -> Tensor Build t -- ^ __output__ quantizeAndDequantizeV3 = quantizeAndDequantizeV3' id quantizeAndDequantizeV3' :: forall v'1 v'2 v'3 v'4 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 t -- ^ __input_min__ -> Tensor v'3 t -- ^ __input_max__ -> Tensor v'4 Data.Int.Int32 -- ^ __num_bits__ -> Tensor Build t -- ^ __output__ quantizeAndDequantizeV3' op'options input input_min input_max num_bits | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs input_min, buildInputs input_max, buildInputs num_bits] return (opDef "QuantizeAndDequantizeV3" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "input_min" type_attr: "T" } input_arg { name: "input_max" type_attr: "T" } input_arg { name: "num_bits" type: DT_INT32 } output_arg { name: "output" type_attr: "T" } attr { name: "signed_input" type: "bool" default_value { b: true } } attr { name: "range_given" type: "bool" default_value { b: true } } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } -} -- | quantizeDownAndShrinkRange :: forall v'1 v'2 v'3 tinput out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tinput, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] out_type) => Tensor v'1 tinput -- ^ __input__ -> Tensor v'2 Float -- ^ __input_min__ -> Tensor v'3 Float -- ^ __input_max__ -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __output_min__, __output_max__) -- -- * __output__ -- -- * __output_min__ -- -- * __output_max__ quantizeDownAndShrinkRange = quantizeDownAndShrinkRange' id quantizeDownAndShrinkRange' :: forall v'1 v'2 v'3 tinput out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tinput, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] out_type) => OpParams -> Tensor v'1 tinput -- ^ __input__ -> Tensor v'2 Float -- ^ __input_min__ -> Tensor v'3 Float -- ^ __input_max__ -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __output_min__, __output_max__) -- -- * __output__ -- -- * __output_min__ -- -- * __output_max__ quantizeDownAndShrinkRange' op'options input input_min input_max | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs input_min, buildInputs input_max] return (opDef "QuantizeDownAndShrinkRange" & opAttr "Tinput" .~ tensorType (undefined :: tinput) & opAttr "out_type" .~ tensorType (undefined :: out_type) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "Tinput" } input_arg { name: "input_min" type: DT_FLOAT } input_arg { name: "input_max" type: DT_FLOAT } output_arg { name: "output" type_attr: "out_type" } output_arg { name: "output_min" type: DT_FLOAT } output_arg { name: "output_max" type: DT_FLOAT } attr { name: "Tinput" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "out_type" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } -} -- | quantizeV2 :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] t) => Tensor v'1 Float -- ^ __input__ -> Tensor v'2 Float -- ^ __min_range__ -> Tensor v'3 Float -- ^ __max_range__ -> (Tensor Build t, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __output_min__, __output_max__) -- -- * __output__ -- -- * __output_min__ -- -- * __output_max__ quantizeV2 = quantizeV2' id quantizeV2' :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] t) => OpParams -> Tensor v'1 Float -- ^ __input__ -> Tensor v'2 Float -- ^ __min_range__ -> Tensor v'3 Float -- ^ __max_range__ -> (Tensor Build t, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __output_min__, __output_max__) -- -- * __output__ -- -- * __output_min__ -- -- * __output_max__ quantizeV2' op'options input min_range max_range | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs min_range, buildInputs max_range] return (opDef "QuantizeV2" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type: DT_FLOAT } input_arg { name: "min_range" type: DT_FLOAT } input_arg { name: "max_range" type: DT_FLOAT } output_arg { name: "output" type_attr: "T" } output_arg { name: "output_min" type: DT_FLOAT } output_arg { name: "output_max" type: DT_FLOAT } attr { name: "T" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "mode" type: "string" default_value { s: "MIN_COMBINED" } allowed_values { list { s: "MIN_COMBINED" s: "MIN_FIRST" s: "SCALED" } } } attr { name: "round_mode" type: "string" default_value { s: "HALF_AWAY_FROM_ZERO" } allowed_values { list { s: "HALF_AWAY_FROM_ZERO" s: "HALF_TO_EVEN" } } } -} -- | quantizedAdd :: forall v'1 v'2 v'3 v'4 v'5 v'6 t1 t2 toutput . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] t1, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] t2, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] toutput) => Tensor v'1 t1 -- ^ __x__ -> Tensor v'2 t2 -- ^ __y__ -> Tensor v'3 Float -- ^ __min_x__ -> Tensor v'4 Float -- ^ __max_x__ -> Tensor v'5 Float -- ^ __min_y__ -> Tensor v'6 Float -- ^ __max_y__ -> (Tensor Build toutput, Tensor Build Float, Tensor Build Float) -- ^ (__z__, __min_z__, __max_z__) -- -- * __z__ -- -- * __min_z__ -- -- * __max_z__ quantizedAdd = quantizedAdd' id quantizedAdd' :: forall v'1 v'2 v'3 v'4 v'5 v'6 t1 t2 toutput . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] t1, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] t2, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] toutput) => OpParams -> Tensor v'1 t1 -- ^ __x__ -> Tensor v'2 t2 -- ^ __y__ -> Tensor v'3 Float -- ^ __min_x__ -> Tensor v'4 Float -- ^ __max_x__ -> Tensor v'5 Float -- ^ __min_y__ -> Tensor v'6 Float -- ^ __max_y__ -> (Tensor Build toutput, Tensor Build Float, Tensor Build Float) -- ^ (__z__, __min_z__, __max_z__) -- -- * __z__ -- -- * __min_z__ -- -- * __max_z__ quantizedAdd' op'options x y min_x max_x min_y max_y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y, buildInputs min_x, buildInputs max_x, buildInputs min_y, buildInputs max_y] return (opDef "QuantizedAdd" & opAttr "T1" .~ tensorType (undefined :: t1) & opAttr "T2" .~ tensorType (undefined :: t2) & opAttr "Toutput" .~ tensorType (undefined :: toutput) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T1" } input_arg { name: "y" type_attr: "T2" } input_arg { name: "min_x" type: DT_FLOAT } input_arg { name: "max_x" type: DT_FLOAT } input_arg { name: "min_y" type: DT_FLOAT } input_arg { name: "max_y" type: DT_FLOAT } output_arg { name: "z" type_attr: "Toutput" } output_arg { name: "min_z" type: DT_FLOAT } output_arg { name: "max_z" type: DT_FLOAT } attr { name: "T1" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "T2" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "Toutput" type: "type" default_value { type: DT_QINT32 } allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } -} -- | quantizedAvgPool :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] t) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 Float -- ^ __min_input__ -> Tensor v'3 Float -- ^ __max_input__ -> (Tensor Build t, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __min_output__, __max_output__) -- -- * __output__ -- -- * __min_output__ -- -- * __max_output__ quantizedAvgPool = quantizedAvgPool' id quantizedAvgPool' :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 Float -- ^ __min_input__ -> Tensor v'3 Float -- ^ __max_input__ -> (Tensor Build t, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __min_output__, __max_output__) -- -- * __output__ -- -- * __min_output__ -- -- * __max_output__ quantizedAvgPool' op'options input min_input max_input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs min_input, buildInputs max_input] return (opDef "QuantizedAvgPool" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "min_input" type: DT_FLOAT } input_arg { name: "max_input" type: DT_FLOAT } output_arg { name: "output" type_attr: "T" } output_arg { name: "min_output" type: DT_FLOAT } output_arg { name: "max_output" type: DT_FLOAT } attr { name: "T" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "ksize" type: "list(int)" } attr { name: "strides" type: "list(int)" } attr { name: "padding" type: "string" allowed_values { list { s: "SAME" s: "VALID" } } } -} -- | quantizedBatchNormWithGlobalNormalization :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 v'10 v'11 v'12 v'13 v'14 v'15 tinput out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tinput, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] out_type) => Bool -- ^ __scale_after_normalization__ -> Float -- ^ __variance_epsilon__ -> Tensor v'1 tinput -- ^ __t__ -> Tensor v'2 Float -- ^ __t_min__ -> Tensor v'3 Float -- ^ __t_max__ -> Tensor v'4 tinput -- ^ __m__ -> Tensor v'5 Float -- ^ __m_min__ -> Tensor v'6 Float -- ^ __m_max__ -> Tensor v'7 tinput -- ^ __v__ -> Tensor v'8 Float -- ^ __v_min__ -> Tensor v'9 Float -- ^ __v_max__ -> Tensor v'10 tinput -- ^ __beta__ -> Tensor v'11 Float -- ^ __beta_min__ -> Tensor v'12 Float -- ^ __beta_max__ -> Tensor v'13 tinput -- ^ __gamma__ -> Tensor v'14 Float -- ^ __gamma_min__ -> Tensor v'15 Float -- ^ __gamma_max__ -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- ^ (__result__, __result_min__, __result_max__) -- -- * __result__ -- -- * __result_min__ -- -- * __result_max__ quantizedBatchNormWithGlobalNormalization = quantizedBatchNormWithGlobalNormalization' id quantizedBatchNormWithGlobalNormalization' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 v'10 v'11 v'12 v'13 v'14 v'15 tinput out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tinput, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] out_type) => OpParams -> Bool -- ^ __scale_after_normalization__ -> Float -- ^ __variance_epsilon__ -> Tensor v'1 tinput -- ^ __t__ -> Tensor v'2 Float -- ^ __t_min__ -> Tensor v'3 Float -- ^ __t_max__ -> Tensor v'4 tinput -- ^ __m__ -> Tensor v'5 Float -- ^ __m_min__ -> Tensor v'6 Float -- ^ __m_max__ -> Tensor v'7 tinput -- ^ __v__ -> Tensor v'8 Float -- ^ __v_min__ -> Tensor v'9 Float -- ^ __v_max__ -> Tensor v'10 tinput -- ^ __beta__ -> Tensor v'11 Float -- ^ __beta_min__ -> Tensor v'12 Float -- ^ __beta_max__ -> Tensor v'13 tinput -- ^ __gamma__ -> Tensor v'14 Float -- ^ __gamma_min__ -> Tensor v'15 Float -- ^ __gamma_max__ -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- ^ (__result__, __result_min__, __result_max__) -- -- * __result__ -- -- * __result_min__ -- -- * __result_max__ quantizedBatchNormWithGlobalNormalization' op'options scale_after_normalization variance_epsilon t t_min t_max m m_min m_max v v_min v_max beta beta_min beta_max gamma gamma_min gamma_max | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs t, buildInputs t_min, buildInputs t_max, buildInputs m, buildInputs m_min, buildInputs m_max, buildInputs v, buildInputs v_min, buildInputs v_max, buildInputs beta, buildInputs beta_min, buildInputs beta_max, buildInputs gamma, buildInputs gamma_min, buildInputs gamma_max] return (opDef "QuantizedBatchNormWithGlobalNormalization" & opAttr "Tinput" .~ tensorType (undefined :: tinput) & opAttr "out_type" .~ tensorType (undefined :: out_type) & opAttr "scale_after_normalization" .~ scale_after_normalization & opAttr "variance_epsilon" .~ variance_epsilon & op'options & opInputs .~ op'inputs) {- input_arg { name: "t" type_attr: "Tinput" } input_arg { name: "t_min" type: DT_FLOAT } input_arg { name: "t_max" type: DT_FLOAT } input_arg { name: "m" type_attr: "Tinput" } input_arg { name: "m_min" type: DT_FLOAT } input_arg { name: "m_max" type: DT_FLOAT } input_arg { name: "v" type_attr: "Tinput" } input_arg { name: "v_min" type: DT_FLOAT } input_arg { name: "v_max" type: DT_FLOAT } input_arg { name: "beta" type_attr: "Tinput" } input_arg { name: "beta_min" type: DT_FLOAT } input_arg { name: "beta_max" type: DT_FLOAT } input_arg { name: "gamma" type_attr: "Tinput" } input_arg { name: "gamma_min" type: DT_FLOAT } input_arg { name: "gamma_max" type: DT_FLOAT } output_arg { name: "result" type_attr: "out_type" } output_arg { name: "result_min" type: DT_FLOAT } output_arg { name: "result_max" type: DT_FLOAT } attr { name: "Tinput" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "out_type" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "variance_epsilon" type: "float" } attr { name: "scale_after_normalization" type: "bool" } -} -- | quantizedBiasAdd :: forall v'1 v'2 v'3 v'4 v'5 v'6 t1 t2 out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] t1, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] t2, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] out_type) => Tensor v'1 t1 -- ^ __input__ -> Tensor v'2 t2 -- ^ __bias__ -> Tensor v'3 Float -- ^ __min_input__ -> Tensor v'4 Float -- ^ __max_input__ -> Tensor v'5 Float -- ^ __min_bias__ -> Tensor v'6 Float -- ^ __max_bias__ -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __min_out__, __max_out__) -- -- * __output__ -- -- * __min_out__ -- -- * __max_out__ quantizedBiasAdd = quantizedBiasAdd' id quantizedBiasAdd' :: forall v'1 v'2 v'3 v'4 v'5 v'6 t1 t2 out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] t1, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] t2, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] out_type) => OpParams -> Tensor v'1 t1 -- ^ __input__ -> Tensor v'2 t2 -- ^ __bias__ -> Tensor v'3 Float -- ^ __min_input__ -> Tensor v'4 Float -- ^ __max_input__ -> Tensor v'5 Float -- ^ __min_bias__ -> Tensor v'6 Float -- ^ __max_bias__ -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __min_out__, __max_out__) -- -- * __output__ -- -- * __min_out__ -- -- * __max_out__ quantizedBiasAdd' op'options input bias min_input max_input min_bias max_bias | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs bias, buildInputs min_input, buildInputs max_input, buildInputs min_bias, buildInputs max_bias] return (opDef "QuantizedBiasAdd" & opAttr "T1" .~ tensorType (undefined :: t1) & opAttr "T2" .~ tensorType (undefined :: t2) & opAttr "out_type" .~ tensorType (undefined :: out_type) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T1" } input_arg { name: "bias" type_attr: "T2" } input_arg { name: "min_input" type: DT_FLOAT } input_arg { name: "max_input" type: DT_FLOAT } input_arg { name: "min_bias" type: DT_FLOAT } input_arg { name: "max_bias" type: DT_FLOAT } output_arg { name: "output" type_attr: "out_type" } output_arg { name: "min_out" type: DT_FLOAT } output_arg { name: "max_out" type: DT_FLOAT } attr { name: "T1" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "T2" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "out_type" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } -} -- | quantizedConcat :: forall v'1 v'2 v'3 v'4 t . (TensorType t) => Tensor v'1 Data.Int.Int32 -- ^ __concat_dim__ -> [Tensor v'2 t] -- ^ __values__ -> [Tensor v'3 Float] -- ^ __input_mins__ -> [Tensor v'4 Float] -- ^ __input_maxes__ -> (Tensor Build t, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __output_min__, __output_max__) -- -- * __output__ -- -- * __output_min__ -- -- * __output_max__ quantizedConcat = quantizedConcat' id quantizedConcat' :: forall v'1 v'2 v'3 v'4 t . (TensorType t) => OpParams -> Tensor v'1 Data.Int.Int32 -- ^ __concat_dim__ -> [Tensor v'2 t] -- ^ __values__ -> [Tensor v'3 Float] -- ^ __input_mins__ -> [Tensor v'4 Float] -- ^ __input_maxes__ -> (Tensor Build t, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __output_min__, __output_max__) -- -- * __output__ -- -- * __output_min__ -- -- * __output_max__ quantizedConcat' op'options concat_dim values input_mins input_maxes | eqLengthGuard [("N", [("values", length values), ("input_mins", length input_mins), ("input_maxes", length input_maxes)])] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs concat_dim, buildInputs values, buildInputs input_mins, buildInputs input_maxes] return (opDef "QuantizedConcat" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "N" .~ n & op'options & opInputs .~ op'inputs) where n = fromIntegral (length values) :: Int64 {- input_arg { name: "concat_dim" type: DT_INT32 } input_arg { name: "values" type_attr: "T" number_attr: "N" } input_arg { name: "input_mins" type: DT_FLOAT number_attr: "N" } input_arg { name: "input_maxes" type: DT_FLOAT number_attr: "N" } output_arg { name: "output" type_attr: "T" } output_arg { name: "output_min" type: DT_FLOAT } output_arg { name: "output_max" type: DT_FLOAT } attr { name: "N" type: "int" has_minimum: true minimum: 2 } attr { name: "T" type: "type" } -} -- | quantizedConv2D :: forall v'1 v'2 v'3 v'4 v'5 v'6 tinput tfilter out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tinput, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tfilter, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] out_type) => Tensor v'1 tinput -- ^ __input__ -> Tensor v'2 tfilter -- ^ __filter__ -> Tensor v'3 Float -- ^ __min_input__ -> Tensor v'4 Float -- ^ __max_input__ -> Tensor v'5 Float -- ^ __min_filter__ -> Tensor v'6 Float -- ^ __max_filter__ -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __min_output__, __max_output__) -- -- * __output__ -- -- * __min_output__ -- -- * __max_output__ quantizedConv2D = quantizedConv2D' id quantizedConv2D' :: forall v'1 v'2 v'3 v'4 v'5 v'6 tinput tfilter out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tinput, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tfilter, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] out_type) => OpParams -> Tensor v'1 tinput -- ^ __input__ -> Tensor v'2 tfilter -- ^ __filter__ -> Tensor v'3 Float -- ^ __min_input__ -> Tensor v'4 Float -- ^ __max_input__ -> Tensor v'5 Float -- ^ __min_filter__ -> Tensor v'6 Float -- ^ __max_filter__ -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __min_output__, __max_output__) -- -- * __output__ -- -- * __min_output__ -- -- * __max_output__ quantizedConv2D' op'options input filter min_input max_input min_filter max_filter | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs filter, buildInputs min_input, buildInputs max_input, buildInputs min_filter, buildInputs max_filter] return (opDef "QuantizedConv2D" & opAttr "Tinput" .~ tensorType (undefined :: tinput) & opAttr "Tfilter" .~ tensorType (undefined :: tfilter) & opAttr "out_type" .~ tensorType (undefined :: out_type) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "Tinput" } input_arg { name: "filter" type_attr: "Tfilter" } input_arg { name: "min_input" type: DT_FLOAT } input_arg { name: "max_input" type: DT_FLOAT } input_arg { name: "min_filter" type: DT_FLOAT } input_arg { name: "max_filter" type: DT_FLOAT } output_arg { name: "output" type_attr: "out_type" } output_arg { name: "min_output" type: DT_FLOAT } output_arg { name: "max_output" type: DT_FLOAT } attr { name: "Tinput" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "Tfilter" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "out_type" type: "type" default_value { type: DT_QINT32 } allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "strides" type: "list(int)" } attr { name: "padding" type: "string" allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "dilations" type: "list(int)" default_value { list { i: 1 i: 1 i: 1 i: 1 } } } -} -- | quantizedConv2DAndRelu :: forall v'1 v'2 v'3 v'4 v'5 v'6 tinput tfilter out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tinput, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tfilter, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] out_type) => Tensor v'1 tinput -- ^ __input__ -> Tensor v'2 tfilter -- ^ __filter__ -> Tensor v'3 Float -- ^ __min_input__ -> Tensor v'4 Float -- ^ __max_input__ -> Tensor v'5 Float -- ^ __min_filter__ -> Tensor v'6 Float -- ^ __max_filter__ -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __min_output__, __max_output__) -- -- * __output__ -- -- * __min_output__ -- -- * __max_output__ quantizedConv2DAndRelu = quantizedConv2DAndRelu' id quantizedConv2DAndRelu' :: forall v'1 v'2 v'3 v'4 v'5 v'6 tinput tfilter out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tinput, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tfilter, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] out_type) => OpParams -> Tensor v'1 tinput -- ^ __input__ -> Tensor v'2 tfilter -- ^ __filter__ -> Tensor v'3 Float -- ^ __min_input__ -> Tensor v'4 Float -- ^ __max_input__ -> Tensor v'5 Float -- ^ __min_filter__ -> Tensor v'6 Float -- ^ __max_filter__ -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __min_output__, __max_output__) -- -- * __output__ -- -- * __min_output__ -- -- * __max_output__ quantizedConv2DAndRelu' op'options input filter min_input max_input min_filter max_filter | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs filter, buildInputs min_input, buildInputs max_input, buildInputs min_filter, buildInputs max_filter] return (opDef "QuantizedConv2DAndRelu" & opAttr "Tinput" .~ tensorType (undefined :: tinput) & opAttr "Tfilter" .~ tensorType (undefined :: tfilter) & opAttr "out_type" .~ tensorType (undefined :: out_type) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "Tinput" } input_arg { name: "filter" type_attr: "Tfilter" } input_arg { name: "min_input" type: DT_FLOAT } input_arg { name: "max_input" type: DT_FLOAT } input_arg { name: "min_filter" type: DT_FLOAT } input_arg { name: "max_filter" type: DT_FLOAT } output_arg { name: "output" type_attr: "out_type" } output_arg { name: "min_output" type: DT_FLOAT } output_arg { name: "max_output" type: DT_FLOAT } attr { name: "Tinput" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "Tfilter" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "out_type" type: "type" default_value { type: DT_QINT32 } allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "strides" type: "list(int)" } attr { name: "padding" type: "string" allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "dilations" type: "list(int)" default_value { list { i: 1 i: 1 i: 1 i: 1 } } } attr { name: "padding_list" type: "list(int)" default_value { list { } } } -} -- | quantizedConv2DAndReluAndRequantize :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 tinput tfilter out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tinput, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tfilter, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] out_type) => Tensor v'1 tinput -- ^ __input__ -> Tensor v'2 tfilter -- ^ __filter__ -> Tensor v'3 Float -- ^ __min_input__ -> Tensor v'4 Float -- ^ __max_input__ -> Tensor v'5 Float -- ^ __min_filter__ -> Tensor v'6 Float -- ^ __max_filter__ -> Tensor v'7 Float -- ^ __min_freezed_output__ -> Tensor v'8 Float -- ^ __max_freezed_output__ -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __min_output__, __max_output__) -- -- * __output__ -- -- * __min_output__ -- -- * __max_output__ quantizedConv2DAndReluAndRequantize = quantizedConv2DAndReluAndRequantize' id quantizedConv2DAndReluAndRequantize' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 tinput tfilter out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tinput, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tfilter, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] out_type) => OpParams -> Tensor v'1 tinput -- ^ __input__ -> Tensor v'2 tfilter -- ^ __filter__ -> Tensor v'3 Float -- ^ __min_input__ -> Tensor v'4 Float -- ^ __max_input__ -> Tensor v'5 Float -- ^ __min_filter__ -> Tensor v'6 Float -- ^ __max_filter__ -> Tensor v'7 Float -- ^ __min_freezed_output__ -> Tensor v'8 Float -- ^ __max_freezed_output__ -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __min_output__, __max_output__) -- -- * __output__ -- -- * __min_output__ -- -- * __max_output__ quantizedConv2DAndReluAndRequantize' op'options input filter min_input max_input min_filter max_filter min_freezed_output max_freezed_output | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs filter, buildInputs min_input, buildInputs max_input, buildInputs min_filter, buildInputs max_filter, buildInputs min_freezed_output, buildInputs max_freezed_output] return (opDef "QuantizedConv2DAndReluAndRequantize" & opAttr "Tinput" .~ tensorType (undefined :: tinput) & opAttr "Tfilter" .~ tensorType (undefined :: tfilter) & opAttr "out_type" .~ tensorType (undefined :: out_type) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "Tinput" } input_arg { name: "filter" type_attr: "Tfilter" } input_arg { name: "min_input" type: DT_FLOAT } input_arg { name: "max_input" type: DT_FLOAT } input_arg { name: "min_filter" type: DT_FLOAT } input_arg { name: "max_filter" type: DT_FLOAT } input_arg { name: "min_freezed_output" type: DT_FLOAT } input_arg { name: "max_freezed_output" type: DT_FLOAT } output_arg { name: "output" type_attr: "out_type" } output_arg { name: "min_output" type: DT_FLOAT } output_arg { name: "max_output" type: DT_FLOAT } attr { name: "Tinput" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "Tfilter" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "out_type" type: "type" default_value { type: DT_QUINT8 } allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "strides" type: "list(int)" } attr { name: "padding" type: "string" allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "dilations" type: "list(int)" default_value { list { i: 1 i: 1 i: 1 i: 1 } } } attr { name: "padding_list" type: "list(int)" default_value { list { } } } -} -- | quantizedConv2DAndRequantize :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 tinput tfilter out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tinput, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tfilter, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] out_type) => Tensor v'1 tinput -- ^ __input__ -> Tensor v'2 tfilter -- ^ __filter__ -> Tensor v'3 Float -- ^ __min_input__ -> Tensor v'4 Float -- ^ __max_input__ -> Tensor v'5 Float -- ^ __min_filter__ -> Tensor v'6 Float -- ^ __max_filter__ -> Tensor v'7 Float -- ^ __min_freezed_output__ -> Tensor v'8 Float -- ^ __max_freezed_output__ -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __min_output__, __max_output__) -- -- * __output__ -- -- * __min_output__ -- -- * __max_output__ quantizedConv2DAndRequantize = quantizedConv2DAndRequantize' id quantizedConv2DAndRequantize' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 tinput tfilter out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tinput, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tfilter, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] out_type) => OpParams -> Tensor v'1 tinput -- ^ __input__ -> Tensor v'2 tfilter -- ^ __filter__ -> Tensor v'3 Float -- ^ __min_input__ -> Tensor v'4 Float -- ^ __max_input__ -> Tensor v'5 Float -- ^ __min_filter__ -> Tensor v'6 Float -- ^ __max_filter__ -> Tensor v'7 Float -- ^ __min_freezed_output__ -> Tensor v'8 Float -- ^ __max_freezed_output__ -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __min_output__, __max_output__) -- -- * __output__ -- -- * __min_output__ -- -- * __max_output__ quantizedConv2DAndRequantize' op'options input filter min_input max_input min_filter max_filter min_freezed_output max_freezed_output | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs filter, buildInputs min_input, buildInputs max_input, buildInputs min_filter, buildInputs max_filter, buildInputs min_freezed_output, buildInputs max_freezed_output] return (opDef "QuantizedConv2DAndRequantize" & opAttr "Tinput" .~ tensorType (undefined :: tinput) & opAttr "Tfilter" .~ tensorType (undefined :: tfilter) & opAttr "out_type" .~ tensorType (undefined :: out_type) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "Tinput" } input_arg { name: "filter" type_attr: "Tfilter" } input_arg { name: "min_input" type: DT_FLOAT } input_arg { name: "max_input" type: DT_FLOAT } input_arg { name: "min_filter" type: DT_FLOAT } input_arg { name: "max_filter" type: DT_FLOAT } input_arg { name: "min_freezed_output" type: DT_FLOAT } input_arg { name: "max_freezed_output" type: DT_FLOAT } output_arg { name: "output" type_attr: "out_type" } output_arg { name: "min_output" type: DT_FLOAT } output_arg { name: "max_output" type: DT_FLOAT } attr { name: "Tinput" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "Tfilter" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "out_type" type: "type" default_value { type: DT_QINT8 } allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "strides" type: "list(int)" } attr { name: "padding" type: "string" allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "dilations" type: "list(int)" default_value { list { i: 1 i: 1 i: 1 i: 1 } } } attr { name: "padding_list" type: "list(int)" default_value { list { } } } -} -- | quantizedConv2DPerChannel :: forall v'1 v'2 v'3 v'4 v'5 v'6 tinput tfilter out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tinput, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tfilter, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] out_type) => Tensor v'1 tinput -- ^ __input__ -> Tensor v'2 tfilter -- ^ __filter__ -> Tensor v'3 Float -- ^ __min_input__ -> Tensor v'4 Float -- ^ __max_input__ -> Tensor v'5 Float -- ^ __min_filter__ -> Tensor v'6 Float -- ^ __max_filter__ -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __min_output__, __max_output__) -- -- * __output__ -- -- * __min_output__ -- -- * __max_output__ quantizedConv2DPerChannel = quantizedConv2DPerChannel' id quantizedConv2DPerChannel' :: forall v'1 v'2 v'3 v'4 v'5 v'6 tinput tfilter out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tinput, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tfilter, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] out_type) => OpParams -> Tensor v'1 tinput -- ^ __input__ -> Tensor v'2 tfilter -- ^ __filter__ -> Tensor v'3 Float -- ^ __min_input__ -> Tensor v'4 Float -- ^ __max_input__ -> Tensor v'5 Float -- ^ __min_filter__ -> Tensor v'6 Float -- ^ __max_filter__ -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __min_output__, __max_output__) -- -- * __output__ -- -- * __min_output__ -- -- * __max_output__ quantizedConv2DPerChannel' op'options input filter min_input max_input min_filter max_filter | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs filter, buildInputs min_input, buildInputs max_input, buildInputs min_filter, buildInputs max_filter] return (opDef "QuantizedConv2DPerChannel" & opAttr "Tinput" .~ tensorType (undefined :: tinput) & opAttr "Tfilter" .~ tensorType (undefined :: tfilter) & opAttr "out_type" .~ tensorType (undefined :: out_type) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "Tinput" } input_arg { name: "filter" type_attr: "Tfilter" } input_arg { name: "min_input" type: DT_FLOAT } input_arg { name: "max_input" type: DT_FLOAT } input_arg { name: "min_filter" type: DT_FLOAT } input_arg { name: "max_filter" type: DT_FLOAT } output_arg { name: "output" type_attr: "out_type" } output_arg { name: "min_output" type: DT_FLOAT } output_arg { name: "max_output" type: DT_FLOAT } attr { name: "Tinput" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "Tfilter" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "out_type" type: "type" default_value { type: DT_QINT32 } allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "strides" type: "list(int)" } attr { name: "padding" type: "string" allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "dilations" type: "list(int)" default_value { list { i: 1 i: 1 i: 1 i: 1 } } } -} -- | quantizedConv2DWithBias :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 tinput tfilter out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tinput, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tfilter, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] out_type) => Tensor v'1 tinput -- ^ __input__ -> Tensor v'2 tfilter -- ^ __filter__ -> Tensor v'3 Float -- ^ __bias__ -> Tensor v'4 Float -- ^ __min_input__ -> Tensor v'5 Float -- ^ __max_input__ -> Tensor v'6 Float -- ^ __min_filter__ -> Tensor v'7 Float -- ^ __max_filter__ -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __min_output__, __max_output__) -- -- * __output__ -- -- * __min_output__ -- -- * __max_output__ quantizedConv2DWithBias = quantizedConv2DWithBias' id quantizedConv2DWithBias' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 tinput tfilter out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tinput, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tfilter, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] out_type) => OpParams -> Tensor v'1 tinput -- ^ __input__ -> Tensor v'2 tfilter -- ^ __filter__ -> Tensor v'3 Float -- ^ __bias__ -> Tensor v'4 Float -- ^ __min_input__ -> Tensor v'5 Float -- ^ __max_input__ -> Tensor v'6 Float -- ^ __min_filter__ -> Tensor v'7 Float -- ^ __max_filter__ -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __min_output__, __max_output__) -- -- * __output__ -- -- * __min_output__ -- -- * __max_output__ quantizedConv2DWithBias' op'options input filter bias min_input max_input min_filter max_filter | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs filter, buildInputs bias, buildInputs min_input, buildInputs max_input, buildInputs min_filter, buildInputs max_filter] return (opDef "QuantizedConv2DWithBias" & opAttr "Tinput" .~ tensorType (undefined :: tinput) & opAttr "Tfilter" .~ tensorType (undefined :: tfilter) & opAttr "out_type" .~ tensorType (undefined :: out_type) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "Tinput" } input_arg { name: "filter" type_attr: "Tfilter" } input_arg { name: "bias" type: DT_FLOAT } input_arg { name: "min_input" type: DT_FLOAT } input_arg { name: "max_input" type: DT_FLOAT } input_arg { name: "min_filter" type: DT_FLOAT } input_arg { name: "max_filter" type: DT_FLOAT } output_arg { name: "output" type_attr: "out_type" } output_arg { name: "min_output" type: DT_FLOAT } output_arg { name: "max_output" type: DT_FLOAT } attr { name: "Tinput" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "Tfilter" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "out_type" type: "type" default_value { type: DT_QINT32 } allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "strides" type: "list(int)" } attr { name: "padding" type: "string" allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "dilations" type: "list(int)" default_value { list { i: 1 i: 1 i: 1 i: 1 } } } attr { name: "padding_list" type: "list(int)" default_value { list { } } } -} -- | quantizedConv2DWithBiasAndRelu :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 tinput tfilter out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tinput, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tfilter, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] out_type) => Tensor v'1 tinput -- ^ __input__ -> Tensor v'2 tfilter -- ^ __filter__ -> Tensor v'3 Float -- ^ __bias__ -> Tensor v'4 Float -- ^ __min_input__ -> Tensor v'5 Float -- ^ __max_input__ -> Tensor v'6 Float -- ^ __min_filter__ -> Tensor v'7 Float -- ^ __max_filter__ -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __min_output__, __max_output__) -- -- * __output__ -- -- * __min_output__ -- -- * __max_output__ quantizedConv2DWithBiasAndRelu = quantizedConv2DWithBiasAndRelu' id quantizedConv2DWithBiasAndRelu' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 tinput tfilter out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tinput, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tfilter, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] out_type) => OpParams -> Tensor v'1 tinput -- ^ __input__ -> Tensor v'2 tfilter -- ^ __filter__ -> Tensor v'3 Float -- ^ __bias__ -> Tensor v'4 Float -- ^ __min_input__ -> Tensor v'5 Float -- ^ __max_input__ -> Tensor v'6 Float -- ^ __min_filter__ -> Tensor v'7 Float -- ^ __max_filter__ -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __min_output__, __max_output__) -- -- * __output__ -- -- * __min_output__ -- -- * __max_output__ quantizedConv2DWithBiasAndRelu' op'options input filter bias min_input max_input min_filter max_filter | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs filter, buildInputs bias, buildInputs min_input, buildInputs max_input, buildInputs min_filter, buildInputs max_filter] return (opDef "QuantizedConv2DWithBiasAndRelu" & opAttr "Tinput" .~ tensorType (undefined :: tinput) & opAttr "Tfilter" .~ tensorType (undefined :: tfilter) & opAttr "out_type" .~ tensorType (undefined :: out_type) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "Tinput" } input_arg { name: "filter" type_attr: "Tfilter" } input_arg { name: "bias" type: DT_FLOAT } input_arg { name: "min_input" type: DT_FLOAT } input_arg { name: "max_input" type: DT_FLOAT } input_arg { name: "min_filter" type: DT_FLOAT } input_arg { name: "max_filter" type: DT_FLOAT } output_arg { name: "output" type_attr: "out_type" } output_arg { name: "min_output" type: DT_FLOAT } output_arg { name: "max_output" type: DT_FLOAT } attr { name: "Tinput" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "Tfilter" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "out_type" type: "type" default_value { type: DT_QINT32 } allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "strides" type: "list(int)" } attr { name: "padding" type: "string" allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "dilations" type: "list(int)" default_value { list { i: 1 i: 1 i: 1 i: 1 } } } attr { name: "padding_list" type: "list(int)" default_value { list { } } } -} -- | quantizedConv2DWithBiasAndReluAndRequantize :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 tinput tfilter tbias out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tinput, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tfilter, OneOf '[Data.Int.Int32, Float] tbias, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] out_type) => Tensor v'1 tinput -- ^ __input__ -> Tensor v'2 tfilter -- ^ __filter__ -> Tensor v'3 tbias -- ^ __bias__ -> Tensor v'4 Float -- ^ __min_input__ -> Tensor v'5 Float -- ^ __max_input__ -> Tensor v'6 Float -- ^ __min_filter__ -> Tensor v'7 Float -- ^ __max_filter__ -> Tensor v'8 Float -- ^ __min_freezed_output__ -> Tensor v'9 Float -- ^ __max_freezed_output__ -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __min_output__, __max_output__) -- -- * __output__ -- -- * __min_output__ -- -- * __max_output__ quantizedConv2DWithBiasAndReluAndRequantize = quantizedConv2DWithBiasAndReluAndRequantize' id quantizedConv2DWithBiasAndReluAndRequantize' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 tinput tfilter tbias out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tinput, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tfilter, OneOf '[Data.Int.Int32, Float] tbias, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] out_type) => OpParams -> Tensor v'1 tinput -- ^ __input__ -> Tensor v'2 tfilter -- ^ __filter__ -> Tensor v'3 tbias -- ^ __bias__ -> Tensor v'4 Float -- ^ __min_input__ -> Tensor v'5 Float -- ^ __max_input__ -> Tensor v'6 Float -- ^ __min_filter__ -> Tensor v'7 Float -- ^ __max_filter__ -> Tensor v'8 Float -- ^ __min_freezed_output__ -> Tensor v'9 Float -- ^ __max_freezed_output__ -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __min_output__, __max_output__) -- -- * __output__ -- -- * __min_output__ -- -- * __max_output__ quantizedConv2DWithBiasAndReluAndRequantize' op'options input filter bias min_input max_input min_filter max_filter min_freezed_output max_freezed_output | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs filter, buildInputs bias, buildInputs min_input, buildInputs max_input, buildInputs min_filter, buildInputs max_filter, buildInputs min_freezed_output, buildInputs max_freezed_output] return (opDef "QuantizedConv2DWithBiasAndReluAndRequantize" & opAttr "Tinput" .~ tensorType (undefined :: tinput) & opAttr "Tfilter" .~ tensorType (undefined :: tfilter) & opAttr "Tbias" .~ tensorType (undefined :: tbias) & opAttr "out_type" .~ tensorType (undefined :: out_type) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "Tinput" } input_arg { name: "filter" type_attr: "Tfilter" } input_arg { name: "bias" type_attr: "Tbias" } input_arg { name: "min_input" type: DT_FLOAT } input_arg { name: "max_input" type: DT_FLOAT } input_arg { name: "min_filter" type: DT_FLOAT } input_arg { name: "max_filter" type: DT_FLOAT } input_arg { name: "min_freezed_output" type: DT_FLOAT } input_arg { name: "max_freezed_output" type: DT_FLOAT } output_arg { name: "output" type_attr: "out_type" } output_arg { name: "min_output" type: DT_FLOAT } output_arg { name: "max_output" type: DT_FLOAT } attr { name: "Tinput" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "Tfilter" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "Tbias" type: "type" allowed_values { list { type: DT_FLOAT type: DT_QINT32 } } } attr { name: "out_type" type: "type" default_value { type: DT_QUINT8 } allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "strides" type: "list(int)" } attr { name: "padding" type: "string" allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "dilations" type: "list(int)" default_value { list { i: 1 i: 1 i: 1 i: 1 } } } attr { name: "padding_list" type: "list(int)" default_value { list { } } } -} -- | quantizedConv2DWithBiasAndRequantize :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 tinput tfilter tbias out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tinput, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tfilter, OneOf '[Data.Int.Int32, Float] tbias, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] out_type) => Tensor v'1 tinput -- ^ __input__ -> Tensor v'2 tfilter -- ^ __filter__ -> Tensor v'3 tbias -- ^ __bias__ -> Tensor v'4 Float -- ^ __min_input__ -> Tensor v'5 Float -- ^ __max_input__ -> Tensor v'6 Float -- ^ __min_filter__ -> Tensor v'7 Float -- ^ __max_filter__ -> Tensor v'8 Float -- ^ __min_freezed_output__ -> Tensor v'9 Float -- ^ __max_freezed_output__ -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __min_output__, __max_output__) -- -- * __output__ -- -- * __min_output__ -- -- * __max_output__ quantizedConv2DWithBiasAndRequantize = quantizedConv2DWithBiasAndRequantize' id quantizedConv2DWithBiasAndRequantize' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 tinput tfilter tbias out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tinput, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tfilter, OneOf '[Data.Int.Int32, Float] tbias, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] out_type) => OpParams -> Tensor v'1 tinput -- ^ __input__ -> Tensor v'2 tfilter -- ^ __filter__ -> Tensor v'3 tbias -- ^ __bias__ -> Tensor v'4 Float -- ^ __min_input__ -> Tensor v'5 Float -- ^ __max_input__ -> Tensor v'6 Float -- ^ __min_filter__ -> Tensor v'7 Float -- ^ __max_filter__ -> Tensor v'8 Float -- ^ __min_freezed_output__ -> Tensor v'9 Float -- ^ __max_freezed_output__ -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __min_output__, __max_output__) -- -- * __output__ -- -- * __min_output__ -- -- * __max_output__ quantizedConv2DWithBiasAndRequantize' op'options input filter bias min_input max_input min_filter max_filter min_freezed_output max_freezed_output | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs filter, buildInputs bias, buildInputs min_input, buildInputs max_input, buildInputs min_filter, buildInputs max_filter, buildInputs min_freezed_output, buildInputs max_freezed_output] return (opDef "QuantizedConv2DWithBiasAndRequantize" & opAttr "Tinput" .~ tensorType (undefined :: tinput) & opAttr "Tfilter" .~ tensorType (undefined :: tfilter) & opAttr "Tbias" .~ tensorType (undefined :: tbias) & opAttr "out_type" .~ tensorType (undefined :: out_type) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "Tinput" } input_arg { name: "filter" type_attr: "Tfilter" } input_arg { name: "bias" type_attr: "Tbias" } input_arg { name: "min_input" type: DT_FLOAT } input_arg { name: "max_input" type: DT_FLOAT } input_arg { name: "min_filter" type: DT_FLOAT } input_arg { name: "max_filter" type: DT_FLOAT } input_arg { name: "min_freezed_output" type: DT_FLOAT } input_arg { name: "max_freezed_output" type: DT_FLOAT } output_arg { name: "output" type_attr: "out_type" } output_arg { name: "min_output" type: DT_FLOAT } output_arg { name: "max_output" type: DT_FLOAT } attr { name: "Tinput" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "Tfilter" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "Tbias" type: "type" allowed_values { list { type: DT_FLOAT type: DT_QINT32 } } } attr { name: "out_type" type: "type" default_value { type: DT_QINT8 } allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "strides" type: "list(int)" } attr { name: "padding" type: "string" allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "dilations" type: "list(int)" default_value { list { i: 1 i: 1 i: 1 i: 1 } } } attr { name: "padding_list" type: "list(int)" default_value { list { } } } -} -- | quantizedConv2DWithBiasSignedSumAndReluAndRequantize :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 v'10 v'11 v'12 tinput tfilter tbias tsummand out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tinput, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tfilter, OneOf '[Data.Int.Int32, Float] tbias, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tsummand, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] out_type) => Tensor v'1 tinput -- ^ __input__ -> Tensor v'2 tfilter -- ^ __filter__ -> Tensor v'3 tbias -- ^ __bias__ -> Tensor v'4 Float -- ^ __min_input__ -> Tensor v'5 Float -- ^ __max_input__ -> Tensor v'6 Float -- ^ __min_filter__ -> Tensor v'7 Float -- ^ __max_filter__ -> Tensor v'8 Float -- ^ __min_freezed_output__ -> Tensor v'9 Float -- ^ __max_freezed_output__ -> Tensor v'10 tsummand -- ^ __summand__ -> Tensor v'11 Float -- ^ __min_summand__ -> Tensor v'12 Float -- ^ __max_summand__ -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __min_output__, __max_output__) -- -- * __output__ -- -- * __min_output__ -- -- * __max_output__ quantizedConv2DWithBiasSignedSumAndReluAndRequantize = quantizedConv2DWithBiasSignedSumAndReluAndRequantize' id quantizedConv2DWithBiasSignedSumAndReluAndRequantize' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 v'10 v'11 v'12 tinput tfilter tbias tsummand out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tinput, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tfilter, OneOf '[Data.Int.Int32, Float] tbias, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tsummand, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] out_type) => OpParams -> Tensor v'1 tinput -- ^ __input__ -> Tensor v'2 tfilter -- ^ __filter__ -> Tensor v'3 tbias -- ^ __bias__ -> Tensor v'4 Float -- ^ __min_input__ -> Tensor v'5 Float -- ^ __max_input__ -> Tensor v'6 Float -- ^ __min_filter__ -> Tensor v'7 Float -- ^ __max_filter__ -> Tensor v'8 Float -- ^ __min_freezed_output__ -> Tensor v'9 Float -- ^ __max_freezed_output__ -> Tensor v'10 tsummand -- ^ __summand__ -> Tensor v'11 Float -- ^ __min_summand__ -> Tensor v'12 Float -- ^ __max_summand__ -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __min_output__, __max_output__) -- -- * __output__ -- -- * __min_output__ -- -- * __max_output__ quantizedConv2DWithBiasSignedSumAndReluAndRequantize' op'options input filter bias min_input max_input min_filter max_filter min_freezed_output max_freezed_output summand min_summand max_summand | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs filter, buildInputs bias, buildInputs min_input, buildInputs max_input, buildInputs min_filter, buildInputs max_filter, buildInputs min_freezed_output, buildInputs max_freezed_output, buildInputs summand, buildInputs min_summand, buildInputs max_summand] return (opDef "QuantizedConv2DWithBiasSignedSumAndReluAndRequantize" & opAttr "Tinput" .~ tensorType (undefined :: tinput) & opAttr "Tfilter" .~ tensorType (undefined :: tfilter) & opAttr "Tbias" .~ tensorType (undefined :: tbias) & opAttr "Tsummand" .~ tensorType (undefined :: tsummand) & opAttr "out_type" .~ tensorType (undefined :: out_type) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "Tinput" } input_arg { name: "filter" type_attr: "Tfilter" } input_arg { name: "bias" type_attr: "Tbias" } input_arg { name: "min_input" type: DT_FLOAT } input_arg { name: "max_input" type: DT_FLOAT } input_arg { name: "min_filter" type: DT_FLOAT } input_arg { name: "max_filter" type: DT_FLOAT } input_arg { name: "min_freezed_output" type: DT_FLOAT } input_arg { name: "max_freezed_output" type: DT_FLOAT } input_arg { name: "summand" type_attr: "Tsummand" } input_arg { name: "min_summand" type: DT_FLOAT } input_arg { name: "max_summand" type: DT_FLOAT } output_arg { name: "output" type_attr: "out_type" } output_arg { name: "min_output" type: DT_FLOAT } output_arg { name: "max_output" type: DT_FLOAT } attr { name: "Tinput" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "Tfilter" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "Tbias" type: "type" allowed_values { list { type: DT_FLOAT type: DT_QINT32 } } } attr { name: "Tsummand" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "out_type" type: "type" default_value { type: DT_QUINT8 } allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "strides" type: "list(int)" } attr { name: "padding" type: "string" allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "dilations" type: "list(int)" default_value { list { i: 1 i: 1 i: 1 i: 1 } } } attr { name: "padding_list" type: "list(int)" default_value { list { } } } -} -- | quantizedConv2DWithBiasSumAndRelu :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 tinput tfilter out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tinput, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tfilter, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] out_type) => Tensor v'1 tinput -- ^ __input__ -> Tensor v'2 tfilter -- ^ __filter__ -> Tensor v'3 Float -- ^ __bias__ -> Tensor v'4 Float -- ^ __min_input__ -> Tensor v'5 Float -- ^ __max_input__ -> Tensor v'6 Float -- ^ __min_filter__ -> Tensor v'7 Float -- ^ __max_filter__ -> Tensor v'8 Float -- ^ __summand__ -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __min_output__, __max_output__) -- -- * __output__ -- -- * __min_output__ -- -- * __max_output__ quantizedConv2DWithBiasSumAndRelu = quantizedConv2DWithBiasSumAndRelu' id quantizedConv2DWithBiasSumAndRelu' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 tinput tfilter out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tinput, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tfilter, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] out_type) => OpParams -> Tensor v'1 tinput -- ^ __input__ -> Tensor v'2 tfilter -- ^ __filter__ -> Tensor v'3 Float -- ^ __bias__ -> Tensor v'4 Float -- ^ __min_input__ -> Tensor v'5 Float -- ^ __max_input__ -> Tensor v'6 Float -- ^ __min_filter__ -> Tensor v'7 Float -- ^ __max_filter__ -> Tensor v'8 Float -- ^ __summand__ -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __min_output__, __max_output__) -- -- * __output__ -- -- * __min_output__ -- -- * __max_output__ quantizedConv2DWithBiasSumAndRelu' op'options input filter bias min_input max_input min_filter max_filter summand | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs filter, buildInputs bias, buildInputs min_input, buildInputs max_input, buildInputs min_filter, buildInputs max_filter, buildInputs summand] return (opDef "QuantizedConv2DWithBiasSumAndRelu" & opAttr "Tinput" .~ tensorType (undefined :: tinput) & opAttr "Tfilter" .~ tensorType (undefined :: tfilter) & opAttr "out_type" .~ tensorType (undefined :: out_type) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "Tinput" } input_arg { name: "filter" type_attr: "Tfilter" } input_arg { name: "bias" type: DT_FLOAT } input_arg { name: "min_input" type: DT_FLOAT } input_arg { name: "max_input" type: DT_FLOAT } input_arg { name: "min_filter" type: DT_FLOAT } input_arg { name: "max_filter" type: DT_FLOAT } input_arg { name: "summand" type: DT_FLOAT } output_arg { name: "output" type_attr: "out_type" } output_arg { name: "min_output" type: DT_FLOAT } output_arg { name: "max_output" type: DT_FLOAT } attr { name: "Tinput" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "Tfilter" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "out_type" type: "type" default_value { type: DT_QINT32 } allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "strides" type: "list(int)" } attr { name: "padding" type: "string" allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "dilations" type: "list(int)" default_value { list { i: 1 i: 1 i: 1 i: 1 } } } attr { name: "padding_list" type: "list(int)" default_value { list { } } } -} -- | quantizedConv2DWithBiasSumAndReluAndRequantize :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 v'10 v'11 v'12 tinput tfilter tbias tsummand out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tinput, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tfilter, OneOf '[Data.Int.Int32, Float] tbias, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tsummand, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] out_type) => Tensor v'1 tinput -- ^ __input__ -> Tensor v'2 tfilter -- ^ __filter__ -> Tensor v'3 tbias -- ^ __bias__ -> Tensor v'4 Float -- ^ __min_input__ -> Tensor v'5 Float -- ^ __max_input__ -> Tensor v'6 Float -- ^ __min_filter__ -> Tensor v'7 Float -- ^ __max_filter__ -> Tensor v'8 Float -- ^ __min_freezed_output__ -> Tensor v'9 Float -- ^ __max_freezed_output__ -> Tensor v'10 tsummand -- ^ __summand__ -> Tensor v'11 Float -- ^ __min_summand__ -> Tensor v'12 Float -- ^ __max_summand__ -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __min_output__, __max_output__) -- -- * __output__ -- -- * __min_output__ -- -- * __max_output__ quantizedConv2DWithBiasSumAndReluAndRequantize = quantizedConv2DWithBiasSumAndReluAndRequantize' id quantizedConv2DWithBiasSumAndReluAndRequantize' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 v'10 v'11 v'12 tinput tfilter tbias tsummand out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tinput, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tfilter, OneOf '[Data.Int.Int32, Float] tbias, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tsummand, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] out_type) => OpParams -> Tensor v'1 tinput -- ^ __input__ -> Tensor v'2 tfilter -- ^ __filter__ -> Tensor v'3 tbias -- ^ __bias__ -> Tensor v'4 Float -- ^ __min_input__ -> Tensor v'5 Float -- ^ __max_input__ -> Tensor v'6 Float -- ^ __min_filter__ -> Tensor v'7 Float -- ^ __max_filter__ -> Tensor v'8 Float -- ^ __min_freezed_output__ -> Tensor v'9 Float -- ^ __max_freezed_output__ -> Tensor v'10 tsummand -- ^ __summand__ -> Tensor v'11 Float -- ^ __min_summand__ -> Tensor v'12 Float -- ^ __max_summand__ -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __min_output__, __max_output__) -- -- * __output__ -- -- * __min_output__ -- -- * __max_output__ quantizedConv2DWithBiasSumAndReluAndRequantize' op'options input filter bias min_input max_input min_filter max_filter min_freezed_output max_freezed_output summand min_summand max_summand | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs filter, buildInputs bias, buildInputs min_input, buildInputs max_input, buildInputs min_filter, buildInputs max_filter, buildInputs min_freezed_output, buildInputs max_freezed_output, buildInputs summand, buildInputs min_summand, buildInputs max_summand] return (opDef "QuantizedConv2DWithBiasSumAndReluAndRequantize" & opAttr "Tinput" .~ tensorType (undefined :: tinput) & opAttr "Tfilter" .~ tensorType (undefined :: tfilter) & opAttr "Tbias" .~ tensorType (undefined :: tbias) & opAttr "Tsummand" .~ tensorType (undefined :: tsummand) & opAttr "out_type" .~ tensorType (undefined :: out_type) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "Tinput" } input_arg { name: "filter" type_attr: "Tfilter" } input_arg { name: "bias" type_attr: "Tbias" } input_arg { name: "min_input" type: DT_FLOAT } input_arg { name: "max_input" type: DT_FLOAT } input_arg { name: "min_filter" type: DT_FLOAT } input_arg { name: "max_filter" type: DT_FLOAT } input_arg { name: "min_freezed_output" type: DT_FLOAT } input_arg { name: "max_freezed_output" type: DT_FLOAT } input_arg { name: "summand" type_attr: "Tsummand" } input_arg { name: "min_summand" type: DT_FLOAT } input_arg { name: "max_summand" type: DT_FLOAT } output_arg { name: "output" type_attr: "out_type" } output_arg { name: "min_output" type: DT_FLOAT } output_arg { name: "max_output" type: DT_FLOAT } attr { name: "Tinput" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "Tfilter" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "Tbias" type: "type" allowed_values { list { type: DT_FLOAT type: DT_QINT32 } } } attr { name: "Tsummand" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "out_type" type: "type" default_value { type: DT_QUINT8 } allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "strides" type: "list(int)" } attr { name: "padding" type: "string" allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "dilations" type: "list(int)" default_value { list { i: 1 i: 1 i: 1 i: 1 } } } attr { name: "padding_list" type: "list(int)" default_value { list { } } } -} -- | quantizedDepthwiseConv2D :: forall v'1 v'2 v'3 v'4 v'5 v'6 tinput tfilter out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tinput, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tfilter, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] out_type) => Tensor v'1 tinput -- ^ __input__ -> Tensor v'2 tfilter -- ^ __filter__ -> Tensor v'3 Float -- ^ __min_input__ -> Tensor v'4 Float -- ^ __max_input__ -> Tensor v'5 Float -- ^ __min_filter__ -> Tensor v'6 Float -- ^ __max_filter__ -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __min_output__, __max_output__) -- -- * __output__ -- -- * __min_output__ -- -- * __max_output__ quantizedDepthwiseConv2D = quantizedDepthwiseConv2D' id quantizedDepthwiseConv2D' :: forall v'1 v'2 v'3 v'4 v'5 v'6 tinput tfilter out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tinput, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tfilter, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] out_type) => OpParams -> Tensor v'1 tinput -- ^ __input__ -> Tensor v'2 tfilter -- ^ __filter__ -> Tensor v'3 Float -- ^ __min_input__ -> Tensor v'4 Float -- ^ __max_input__ -> Tensor v'5 Float -- ^ __min_filter__ -> Tensor v'6 Float -- ^ __max_filter__ -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __min_output__, __max_output__) -- -- * __output__ -- -- * __min_output__ -- -- * __max_output__ quantizedDepthwiseConv2D' op'options input filter min_input max_input min_filter max_filter | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs filter, buildInputs min_input, buildInputs max_input, buildInputs min_filter, buildInputs max_filter] return (opDef "QuantizedDepthwiseConv2D" & opAttr "Tinput" .~ tensorType (undefined :: tinput) & opAttr "Tfilter" .~ tensorType (undefined :: tfilter) & opAttr "out_type" .~ tensorType (undefined :: out_type) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "Tinput" } input_arg { name: "filter" type_attr: "Tfilter" } input_arg { name: "min_input" type: DT_FLOAT } input_arg { name: "max_input" type: DT_FLOAT } input_arg { name: "min_filter" type: DT_FLOAT } input_arg { name: "max_filter" type: DT_FLOAT } output_arg { name: "output" type_attr: "out_type" } output_arg { name: "min_output" type: DT_FLOAT } output_arg { name: "max_output" type: DT_FLOAT } attr { name: "Tinput" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "Tfilter" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "out_type" type: "type" default_value { type: DT_QINT32 } allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "strides" type: "list(int)" } attr { name: "padding" type: "string" allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "dilations" type: "list(int)" default_value { list { i: 1 i: 1 i: 1 i: 1 } } } -} -- | quantizedDepthwiseConv2DWithBias :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 tinput tfilter out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tinput, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tfilter, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] out_type) => Tensor v'1 tinput -- ^ __input__ -> Tensor v'2 tfilter -- ^ __filter__ -> Tensor v'3 Float -- ^ __bias__ -> Tensor v'4 Float -- ^ __min_input__ -> Tensor v'5 Float -- ^ __max_input__ -> Tensor v'6 Float -- ^ __min_filter__ -> Tensor v'7 Float -- ^ __max_filter__ -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __min_output__, __max_output__) -- -- * __output__ -- -- * __min_output__ -- -- * __max_output__ quantizedDepthwiseConv2DWithBias = quantizedDepthwiseConv2DWithBias' id quantizedDepthwiseConv2DWithBias' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 tinput tfilter out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tinput, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tfilter, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] out_type) => OpParams -> Tensor v'1 tinput -- ^ __input__ -> Tensor v'2 tfilter -- ^ __filter__ -> Tensor v'3 Float -- ^ __bias__ -> Tensor v'4 Float -- ^ __min_input__ -> Tensor v'5 Float -- ^ __max_input__ -> Tensor v'6 Float -- ^ __min_filter__ -> Tensor v'7 Float -- ^ __max_filter__ -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __min_output__, __max_output__) -- -- * __output__ -- -- * __min_output__ -- -- * __max_output__ quantizedDepthwiseConv2DWithBias' op'options input filter bias min_input max_input min_filter max_filter | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs filter, buildInputs bias, buildInputs min_input, buildInputs max_input, buildInputs min_filter, buildInputs max_filter] return (opDef "QuantizedDepthwiseConv2DWithBias" & opAttr "Tinput" .~ tensorType (undefined :: tinput) & opAttr "Tfilter" .~ tensorType (undefined :: tfilter) & opAttr "out_type" .~ tensorType (undefined :: out_type) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "Tinput" } input_arg { name: "filter" type_attr: "Tfilter" } input_arg { name: "bias" type: DT_FLOAT } input_arg { name: "min_input" type: DT_FLOAT } input_arg { name: "max_input" type: DT_FLOAT } input_arg { name: "min_filter" type: DT_FLOAT } input_arg { name: "max_filter" type: DT_FLOAT } output_arg { name: "output" type_attr: "out_type" } output_arg { name: "min_output" type: DT_FLOAT } output_arg { name: "max_output" type: DT_FLOAT } attr { name: "Tinput" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "Tfilter" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "out_type" type: "type" default_value { type: DT_QINT32 } allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "strides" type: "list(int)" } attr { name: "padding" type: "string" allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "dilations" type: "list(int)" default_value { list { i: 1 i: 1 i: 1 i: 1 } } } -} -- | quantizedDepthwiseConv2DWithBiasAndRelu :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 tinput tfilter out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tinput, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tfilter, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] out_type) => Tensor v'1 tinput -- ^ __input__ -> Tensor v'2 tfilter -- ^ __filter__ -> Tensor v'3 Float -- ^ __bias__ -> Tensor v'4 Float -- ^ __min_input__ -> Tensor v'5 Float -- ^ __max_input__ -> Tensor v'6 Float -- ^ __min_filter__ -> Tensor v'7 Float -- ^ __max_filter__ -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __min_output__, __max_output__) -- -- * __output__ -- -- * __min_output__ -- -- * __max_output__ quantizedDepthwiseConv2DWithBiasAndRelu = quantizedDepthwiseConv2DWithBiasAndRelu' id quantizedDepthwiseConv2DWithBiasAndRelu' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 tinput tfilter out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tinput, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tfilter, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] out_type) => OpParams -> Tensor v'1 tinput -- ^ __input__ -> Tensor v'2 tfilter -- ^ __filter__ -> Tensor v'3 Float -- ^ __bias__ -> Tensor v'4 Float -- ^ __min_input__ -> Tensor v'5 Float -- ^ __max_input__ -> Tensor v'6 Float -- ^ __min_filter__ -> Tensor v'7 Float -- ^ __max_filter__ -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __min_output__, __max_output__) -- -- * __output__ -- -- * __min_output__ -- -- * __max_output__ quantizedDepthwiseConv2DWithBiasAndRelu' op'options input filter bias min_input max_input min_filter max_filter | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs filter, buildInputs bias, buildInputs min_input, buildInputs max_input, buildInputs min_filter, buildInputs max_filter] return (opDef "QuantizedDepthwiseConv2DWithBiasAndRelu" & opAttr "Tinput" .~ tensorType (undefined :: tinput) & opAttr "Tfilter" .~ tensorType (undefined :: tfilter) & opAttr "out_type" .~ tensorType (undefined :: out_type) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "Tinput" } input_arg { name: "filter" type_attr: "Tfilter" } input_arg { name: "bias" type: DT_FLOAT } input_arg { name: "min_input" type: DT_FLOAT } input_arg { name: "max_input" type: DT_FLOAT } input_arg { name: "min_filter" type: DT_FLOAT } input_arg { name: "max_filter" type: DT_FLOAT } output_arg { name: "output" type_attr: "out_type" } output_arg { name: "min_output" type: DT_FLOAT } output_arg { name: "max_output" type: DT_FLOAT } attr { name: "Tinput" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "Tfilter" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "out_type" type: "type" default_value { type: DT_QINT32 } allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "strides" type: "list(int)" } attr { name: "padding" type: "string" allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "dilations" type: "list(int)" default_value { list { i: 1 i: 1 i: 1 i: 1 } } } -} -- | quantizedDepthwiseConv2DWithBiasAndReluAndRequantize :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 tinput tfilter tbias out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tinput, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tfilter, OneOf '[Data.Int.Int32, Float] tbias, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] out_type) => Tensor v'1 tinput -- ^ __input__ -> Tensor v'2 tfilter -- ^ __filter__ -> Tensor v'3 tbias -- ^ __bias__ -> Tensor v'4 Float -- ^ __min_input__ -> Tensor v'5 Float -- ^ __max_input__ -> Tensor v'6 Float -- ^ __min_filter__ -> Tensor v'7 Float -- ^ __max_filter__ -> Tensor v'8 Float -- ^ __min_freezed_output__ -> Tensor v'9 Float -- ^ __max_freezed_output__ -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __min_output__, __max_output__) -- -- * __output__ -- -- * __min_output__ -- -- * __max_output__ quantizedDepthwiseConv2DWithBiasAndReluAndRequantize = quantizedDepthwiseConv2DWithBiasAndReluAndRequantize' id quantizedDepthwiseConv2DWithBiasAndReluAndRequantize' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 tinput tfilter tbias out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tinput, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tfilter, OneOf '[Data.Int.Int32, Float] tbias, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] out_type) => OpParams -> Tensor v'1 tinput -- ^ __input__ -> Tensor v'2 tfilter -- ^ __filter__ -> Tensor v'3 tbias -- ^ __bias__ -> Tensor v'4 Float -- ^ __min_input__ -> Tensor v'5 Float -- ^ __max_input__ -> Tensor v'6 Float -- ^ __min_filter__ -> Tensor v'7 Float -- ^ __max_filter__ -> Tensor v'8 Float -- ^ __min_freezed_output__ -> Tensor v'9 Float -- ^ __max_freezed_output__ -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __min_output__, __max_output__) -- -- * __output__ -- -- * __min_output__ -- -- * __max_output__ quantizedDepthwiseConv2DWithBiasAndReluAndRequantize' op'options input filter bias min_input max_input min_filter max_filter min_freezed_output max_freezed_output | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs filter, buildInputs bias, buildInputs min_input, buildInputs max_input, buildInputs min_filter, buildInputs max_filter, buildInputs min_freezed_output, buildInputs max_freezed_output] return (opDef "QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize" & opAttr "Tinput" .~ tensorType (undefined :: tinput) & opAttr "Tfilter" .~ tensorType (undefined :: tfilter) & opAttr "Tbias" .~ tensorType (undefined :: tbias) & opAttr "out_type" .~ tensorType (undefined :: out_type) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "Tinput" } input_arg { name: "filter" type_attr: "Tfilter" } input_arg { name: "bias" type_attr: "Tbias" } input_arg { name: "min_input" type: DT_FLOAT } input_arg { name: "max_input" type: DT_FLOAT } input_arg { name: "min_filter" type: DT_FLOAT } input_arg { name: "max_filter" type: DT_FLOAT } input_arg { name: "min_freezed_output" type: DT_FLOAT } input_arg { name: "max_freezed_output" type: DT_FLOAT } output_arg { name: "output" type_attr: "out_type" } output_arg { name: "min_output" type: DT_FLOAT } output_arg { name: "max_output" type: DT_FLOAT } attr { name: "Tinput" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "Tfilter" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "Tbias" type: "type" allowed_values { list { type: DT_FLOAT type: DT_QINT32 } } } attr { name: "out_type" type: "type" default_value { type: DT_QUINT8 } allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "strides" type: "list(int)" } attr { name: "padding" type: "string" allowed_values { list { s: "SAME" s: "VALID" } } } attr { name: "dilations" type: "list(int)" default_value { list { i: 1 i: 1 i: 1 i: 1 } } } -} -- | quantizedInstanceNorm :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 Float -- ^ __x_min__ -> Tensor v'3 Float -- ^ __x_max__ -> (Tensor Build t, Tensor Build Float, Tensor Build Float) -- ^ (__y__, __y_min__, __y_max__) -- -- * __y__ -- -- * __y_min__ -- -- * __y_max__ quantizedInstanceNorm = quantizedInstanceNorm' id quantizedInstanceNorm' :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 Float -- ^ __x_min__ -> Tensor v'3 Float -- ^ __x_max__ -> (Tensor Build t, Tensor Build Float, Tensor Build Float) -- ^ (__y__, __y_min__, __y_max__) -- -- * __y__ -- -- * __y_min__ -- -- * __y_max__ quantizedInstanceNorm' op'options x x_min x_max | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs x_min, buildInputs x_max] return (opDef "QuantizedInstanceNorm" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "x_min" type: DT_FLOAT } input_arg { name: "x_max" type: DT_FLOAT } output_arg { name: "y" type_attr: "T" } output_arg { name: "y_min" type: DT_FLOAT } output_arg { name: "y_max" type: DT_FLOAT } attr { name: "T" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "output_range_given" type: "bool" default_value { b: false } } attr { name: "given_y_min" type: "float" default_value { f: 0.0 } } attr { name: "given_y_max" type: "float" default_value { f: 0.0 } } attr { name: "variance_epsilon" type: "float" default_value { f: 1.0e-5 } } attr { name: "min_separation" type: "float" default_value { f: 1.0e-3 } } -} -- | quantizedMatMul :: forall v'1 v'2 v'3 v'4 v'5 v'6 t1 t2 toutput . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] t1, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] t2, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] toutput) => Tensor v'1 t1 -- ^ __a__ -> Tensor v'2 t2 -- ^ __b__ -> Tensor v'3 Float -- ^ __min_a__ -> Tensor v'4 Float -- ^ __max_a__ -> Tensor v'5 Float -- ^ __min_b__ -> Tensor v'6 Float -- ^ __max_b__ -> (Tensor Build toutput, Tensor Build Float, Tensor Build Float) -- ^ (__out__, __min_out__, __max_out__) -- -- * __out__ -- -- * __min_out__ -- -- * __max_out__ quantizedMatMul = quantizedMatMul' id quantizedMatMul' :: forall v'1 v'2 v'3 v'4 v'5 v'6 t1 t2 toutput . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] t1, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] t2, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] toutput) => OpParams -> Tensor v'1 t1 -- ^ __a__ -> Tensor v'2 t2 -- ^ __b__ -> Tensor v'3 Float -- ^ __min_a__ -> Tensor v'4 Float -- ^ __max_a__ -> Tensor v'5 Float -- ^ __min_b__ -> Tensor v'6 Float -- ^ __max_b__ -> (Tensor Build toutput, Tensor Build Float, Tensor Build Float) -- ^ (__out__, __min_out__, __max_out__) -- -- * __out__ -- -- * __min_out__ -- -- * __max_out__ quantizedMatMul' op'options a b min_a max_a min_b max_b | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs a, buildInputs b, buildInputs min_a, buildInputs max_a, buildInputs min_b, buildInputs max_b] return (opDef "QuantizedMatMul" & opAttr "T1" .~ tensorType (undefined :: t1) & opAttr "T2" .~ tensorType (undefined :: t2) & opAttr "Toutput" .~ tensorType (undefined :: toutput) & op'options & opInputs .~ op'inputs) {- input_arg { name: "a" type_attr: "T1" } input_arg { name: "b" type_attr: "T2" } input_arg { name: "min_a" type: DT_FLOAT } input_arg { name: "max_a" type: DT_FLOAT } input_arg { name: "min_b" type: DT_FLOAT } input_arg { name: "max_b" type: DT_FLOAT } output_arg { name: "out" type_attr: "Toutput" } output_arg { name: "min_out" type: DT_FLOAT } output_arg { name: "max_out" type: DT_FLOAT } attr { name: "T1" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "T2" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "Toutput" type: "type" default_value { type: DT_QINT32 } allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "transpose_a" type: "bool" default_value { b: false } } attr { name: "transpose_b" type: "bool" default_value { b: false } } attr { name: "Tactivation" type: "type" default_value { type: DT_QUINT8 } allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } -} -- | quantizedMaxPool :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] t) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 Float -- ^ __min_input__ -> Tensor v'3 Float -- ^ __max_input__ -> (Tensor Build t, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __min_output__, __max_output__) -- -- * __output__ -- -- * __min_output__ -- -- * __max_output__ quantizedMaxPool = quantizedMaxPool' id quantizedMaxPool' :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 Float -- ^ __min_input__ -> Tensor v'3 Float -- ^ __max_input__ -> (Tensor Build t, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __min_output__, __max_output__) -- -- * __output__ -- -- * __min_output__ -- -- * __max_output__ quantizedMaxPool' op'options input min_input max_input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs min_input, buildInputs max_input] return (opDef "QuantizedMaxPool" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "min_input" type: DT_FLOAT } input_arg { name: "max_input" type: DT_FLOAT } output_arg { name: "output" type_attr: "T" } output_arg { name: "min_output" type: DT_FLOAT } output_arg { name: "max_output" type: DT_FLOAT } attr { name: "T" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "ksize" type: "list(int)" } attr { name: "strides" type: "list(int)" } attr { name: "padding" type: "string" allowed_values { list { s: "SAME" s: "VALID" } } } -} -- | quantizedMul :: forall v'1 v'2 v'3 v'4 v'5 v'6 t1 t2 toutput . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] t1, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] t2, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] toutput) => Tensor v'1 t1 -- ^ __x__ -> Tensor v'2 t2 -- ^ __y__ -> Tensor v'3 Float -- ^ __min_x__ -> Tensor v'4 Float -- ^ __max_x__ -> Tensor v'5 Float -- ^ __min_y__ -> Tensor v'6 Float -- ^ __max_y__ -> (Tensor Build toutput, Tensor Build Float, Tensor Build Float) -- ^ (__z__, __min_z__, __max_z__) -- -- * __z__ -- -- * __min_z__ -- -- * __max_z__ quantizedMul = quantizedMul' id quantizedMul' :: forall v'1 v'2 v'3 v'4 v'5 v'6 t1 t2 toutput . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] t1, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] t2, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] toutput) => OpParams -> Tensor v'1 t1 -- ^ __x__ -> Tensor v'2 t2 -- ^ __y__ -> Tensor v'3 Float -- ^ __min_x__ -> Tensor v'4 Float -- ^ __max_x__ -> Tensor v'5 Float -- ^ __min_y__ -> Tensor v'6 Float -- ^ __max_y__ -> (Tensor Build toutput, Tensor Build Float, Tensor Build Float) -- ^ (__z__, __min_z__, __max_z__) -- -- * __z__ -- -- * __min_z__ -- -- * __max_z__ quantizedMul' op'options x y min_x max_x min_y max_y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y, buildInputs min_x, buildInputs max_x, buildInputs min_y, buildInputs max_y] return (opDef "QuantizedMul" & opAttr "T1" .~ tensorType (undefined :: t1) & opAttr "T2" .~ tensorType (undefined :: t2) & opAttr "Toutput" .~ tensorType (undefined :: toutput) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T1" } input_arg { name: "y" type_attr: "T2" } input_arg { name: "min_x" type: DT_FLOAT } input_arg { name: "max_x" type: DT_FLOAT } input_arg { name: "min_y" type: DT_FLOAT } input_arg { name: "max_y" type: DT_FLOAT } output_arg { name: "z" type_attr: "Toutput" } output_arg { name: "min_z" type: DT_FLOAT } output_arg { name: "max_z" type: DT_FLOAT } attr { name: "T1" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "T2" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "Toutput" type: "type" default_value { type: DT_QINT32 } allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } -} -- | quantizedRelu :: forall v'1 v'2 v'3 tinput out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tinput, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] out_type) => Tensor v'1 tinput -- ^ __features__ -> Tensor v'2 Float -- ^ __min_features__ -> Tensor v'3 Float -- ^ __max_features__ -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- ^ (__activations__, __min_activations__, __max_activations__) -- -- * __activations__ -- -- * __min_activations__ -- -- * __max_activations__ quantizedRelu = quantizedRelu' id quantizedRelu' :: forall v'1 v'2 v'3 tinput out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tinput, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] out_type) => OpParams -> Tensor v'1 tinput -- ^ __features__ -> Tensor v'2 Float -- ^ __min_features__ -> Tensor v'3 Float -- ^ __max_features__ -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- ^ (__activations__, __min_activations__, __max_activations__) -- -- * __activations__ -- -- * __min_activations__ -- -- * __max_activations__ quantizedRelu' op'options features min_features max_features | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs features, buildInputs min_features, buildInputs max_features] return (opDef "QuantizedRelu" & opAttr "Tinput" .~ tensorType (undefined :: tinput) & opAttr "out_type" .~ tensorType (undefined :: out_type) & op'options & opInputs .~ op'inputs) {- input_arg { name: "features" type_attr: "Tinput" } input_arg { name: "min_features" type: DT_FLOAT } input_arg { name: "max_features" type: DT_FLOAT } output_arg { name: "activations" type_attr: "out_type" } output_arg { name: "min_activations" type: DT_FLOAT } output_arg { name: "max_activations" type: DT_FLOAT } attr { name: "Tinput" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "out_type" type: "type" default_value { type: DT_QUINT8 } allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } -} -- | quantizedRelu6 :: forall v'1 v'2 v'3 tinput out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tinput, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] out_type) => Tensor v'1 tinput -- ^ __features__ -> Tensor v'2 Float -- ^ __min_features__ -> Tensor v'3 Float -- ^ __max_features__ -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- ^ (__activations__, __min_activations__, __max_activations__) -- -- * __activations__ -- -- * __min_activations__ -- -- * __max_activations__ quantizedRelu6 = quantizedRelu6' id quantizedRelu6' :: forall v'1 v'2 v'3 tinput out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tinput, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] out_type) => OpParams -> Tensor v'1 tinput -- ^ __features__ -> Tensor v'2 Float -- ^ __min_features__ -> Tensor v'3 Float -- ^ __max_features__ -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- ^ (__activations__, __min_activations__, __max_activations__) -- -- * __activations__ -- -- * __min_activations__ -- -- * __max_activations__ quantizedRelu6' op'options features min_features max_features | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs features, buildInputs min_features, buildInputs max_features] return (opDef "QuantizedRelu6" & opAttr "Tinput" .~ tensorType (undefined :: tinput) & opAttr "out_type" .~ tensorType (undefined :: out_type) & op'options & opInputs .~ op'inputs) {- input_arg { name: "features" type_attr: "Tinput" } input_arg { name: "min_features" type: DT_FLOAT } input_arg { name: "max_features" type: DT_FLOAT } output_arg { name: "activations" type_attr: "out_type" } output_arg { name: "min_activations" type: DT_FLOAT } output_arg { name: "max_activations" type: DT_FLOAT } attr { name: "Tinput" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "out_type" type: "type" default_value { type: DT_QUINT8 } allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } -} -- | quantizedReluX :: forall v'1 v'2 v'3 v'4 tinput out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tinput, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] out_type) => Tensor v'1 tinput -- ^ __features__ -> Tensor v'2 Float -- ^ __max_value__ -> Tensor v'3 Float -- ^ __min_features__ -> Tensor v'4 Float -- ^ __max_features__ -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- ^ (__activations__, __min_activations__, __max_activations__) -- -- * __activations__ -- -- * __min_activations__ -- -- * __max_activations__ quantizedReluX = quantizedReluX' id quantizedReluX' :: forall v'1 v'2 v'3 v'4 tinput out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tinput, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] out_type) => OpParams -> Tensor v'1 tinput -- ^ __features__ -> Tensor v'2 Float -- ^ __max_value__ -> Tensor v'3 Float -- ^ __min_features__ -> Tensor v'4 Float -- ^ __max_features__ -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- ^ (__activations__, __min_activations__, __max_activations__) -- -- * __activations__ -- -- * __min_activations__ -- -- * __max_activations__ quantizedReluX' op'options features max_value min_features max_features | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs features, buildInputs max_value, buildInputs min_features, buildInputs max_features] return (opDef "QuantizedReluX" & opAttr "Tinput" .~ tensorType (undefined :: tinput) & opAttr "out_type" .~ tensorType (undefined :: out_type) & op'options & opInputs .~ op'inputs) {- input_arg { name: "features" type_attr: "Tinput" } input_arg { name: "max_value" type: DT_FLOAT } input_arg { name: "min_features" type: DT_FLOAT } input_arg { name: "max_features" type: DT_FLOAT } output_arg { name: "activations" type_attr: "out_type" } output_arg { name: "min_activations" type: DT_FLOAT } output_arg { name: "max_activations" type: DT_FLOAT } attr { name: "Tinput" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "out_type" type: "type" default_value { type: DT_QUINT8 } allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } -} -- | quantizedReshape :: forall v'1 v'2 v'3 v'4 t tshape . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tshape) => Tensor v'1 t -- ^ __tensor__ -> Tensor v'2 tshape -- ^ __shape__ -> Tensor v'3 Float -- ^ __input_min__ -> Tensor v'4 Float -- ^ __input_max__ -> (Tensor Build t, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __output_min__, __output_max__) -- -- * __output__ -- -- * __output_min__ -- -- * __output_max__ quantizedReshape = quantizedReshape' id quantizedReshape' :: forall v'1 v'2 v'3 v'4 t tshape . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tshape) => OpParams -> Tensor v'1 t -- ^ __tensor__ -> Tensor v'2 tshape -- ^ __shape__ -> Tensor v'3 Float -- ^ __input_min__ -> Tensor v'4 Float -- ^ __input_max__ -> (Tensor Build t, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __output_min__, __output_max__) -- -- * __output__ -- -- * __output_min__ -- -- * __output_max__ quantizedReshape' op'options tensor shape input_min input_max | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs tensor, buildInputs shape, buildInputs input_min, buildInputs input_max] return (opDef "QuantizedReshape" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tshape" .~ tensorType (undefined :: tshape) & op'options & opInputs .~ op'inputs) {- input_arg { name: "tensor" type_attr: "T" } input_arg { name: "shape" type_attr: "Tshape" } input_arg { name: "input_min" type: DT_FLOAT } input_arg { name: "input_max" type: DT_FLOAT } output_arg { name: "output" type_attr: "T" } output_arg { name: "output_min" type: DT_FLOAT } output_arg { name: "output_max" type: DT_FLOAT } attr { name: "T" type: "type" } attr { name: "Tshape" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | quantizedResizeBilinear :: forall v'1 v'2 v'3 v'4 t . (OneOf '[Data.Int.Int32, Data.Word.Word8, Float] t) => Tensor v'1 t -- ^ __images__ -> Tensor v'2 Data.Int.Int32 -- ^ __size__ -> Tensor v'3 Float -- ^ __min__ -> Tensor v'4 Float -- ^ __max__ -> (Tensor Build t, Tensor Build Float, Tensor Build Float) -- ^ (__resized_images__, __out_min__, __out_max__) -- -- * __resized_images__ -- -- * __out_min__ -- -- * __out_max__ quantizedResizeBilinear = quantizedResizeBilinear' id quantizedResizeBilinear' :: forall v'1 v'2 v'3 v'4 t . (OneOf '[Data.Int.Int32, Data.Word.Word8, Float] t) => OpParams -> Tensor v'1 t -- ^ __images__ -> Tensor v'2 Data.Int.Int32 -- ^ __size__ -> Tensor v'3 Float -- ^ __min__ -> Tensor v'4 Float -- ^ __max__ -> (Tensor Build t, Tensor Build Float, Tensor Build Float) -- ^ (__resized_images__, __out_min__, __out_max__) -- -- * __resized_images__ -- -- * __out_min__ -- -- * __out_max__ quantizedResizeBilinear' op'options images size min max | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs images, buildInputs size, buildInputs min, buildInputs max] return (opDef "QuantizedResizeBilinear" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "images" type_attr: "T" } input_arg { name: "size" type: DT_INT32 } input_arg { name: "min" type: DT_FLOAT } input_arg { name: "max" type: DT_FLOAT } output_arg { name: "resized_images" type_attr: "T" } output_arg { name: "out_min" type: DT_FLOAT } output_arg { name: "out_max" type: DT_FLOAT } attr { name: "T" type: "type" allowed_values { list { type: DT_QUINT8 type: DT_QINT32 type: DT_FLOAT } } } attr { name: "align_corners" type: "bool" default_value { b: false } } attr { name: "half_pixel_centers" type: "bool" default_value { b: false } } -} -- | queueClose :: forall m' . (MonadBuild m') => Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> m' (ControlNode) queueClose = queueClose' id queueClose' :: forall m' . (MonadBuild m') => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> m' (ControlNode) queueClose' op'options handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle] buildOp [] (opDef "QueueClose" & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_STRING is_ref: true } attr { name: "cancel_pending_enqueues" type: "bool" default_value { b: false } } -} -- | queueCloseV2 :: forall v'1 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __handle__ -> m' (ControlNode) queueCloseV2 = queueCloseV2' id queueCloseV2' :: forall v'1 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __handle__ -> m' (ControlNode) queueCloseV2' op'options handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle] buildOp [] (opDef "QueueCloseV2" & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_RESOURCE } attr { name: "cancel_pending_enqueues" type: "bool" default_value { b: false } } -} -- | queueDequeue :: forall component_types m' . (MonadBuild m', TensorTypes component_types) => Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> m' (TensorList (Value) component_types) -- ^ __components__ queueDequeue = queueDequeue' id queueDequeue' :: forall component_types m' . (MonadBuild m', TensorTypes component_types) => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> m' (TensorList (Value) component_types) -- ^ __components__ queueDequeue' op'options handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle] buildOp [] (opDef "QueueDequeue" & opAttr "component_types" .~ fromTensorTypes (Proxy :: Proxy component_types) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_STRING is_ref: true } output_arg { name: "components" type_list_attr: "component_types" } attr { name: "component_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "timeout_ms" type: "int" default_value { i: -1 } } -} -- | queueDequeueMany :: forall v'2 component_types m' . (MonadBuild m', TensorTypes component_types) => Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __n__ -> m' (TensorList (Value) component_types) -- ^ __components__ queueDequeueMany = queueDequeueMany' id queueDequeueMany' :: forall v'2 component_types m' . (MonadBuild m', TensorTypes component_types) => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __n__ -> m' (TensorList (Value) component_types) -- ^ __components__ queueDequeueMany' op'options handle n | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs n] buildOp [] (opDef "QueueDequeueMany" & opAttr "component_types" .~ fromTensorTypes (Proxy :: Proxy component_types) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_STRING is_ref: true } input_arg { name: "n" type: DT_INT32 } output_arg { name: "components" type_list_attr: "component_types" } attr { name: "component_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "timeout_ms" type: "int" default_value { i: -1 } } -} -- | queueDequeueManyV2 :: forall v'1 v'2 component_types m' . (MonadBuild m', TensorTypes component_types) => Tensor v'1 ResourceHandle -- ^ __handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __n__ -> m' (TensorList (Value) component_types) -- ^ __components__ queueDequeueManyV2 = queueDequeueManyV2' id queueDequeueManyV2' :: forall v'1 v'2 component_types m' . (MonadBuild m', TensorTypes component_types) => OpParams -> Tensor v'1 ResourceHandle -- ^ __handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __n__ -> m' (TensorList (Value) component_types) -- ^ __components__ queueDequeueManyV2' op'options handle n | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs n] buildOp [] (opDef "QueueDequeueManyV2" & opAttr "component_types" .~ fromTensorTypes (Proxy :: Proxy component_types) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_RESOURCE } input_arg { name: "n" type: DT_INT32 } output_arg { name: "components" type_list_attr: "component_types" } attr { name: "component_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "timeout_ms" type: "int" default_value { i: -1 } } -} -- | queueDequeueUpTo :: forall v'2 component_types m' . (MonadBuild m', TensorTypes component_types) => Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __n__ -> m' (TensorList (Value) component_types) -- ^ __components__ queueDequeueUpTo = queueDequeueUpTo' id queueDequeueUpTo' :: forall v'2 component_types m' . (MonadBuild m', TensorTypes component_types) => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __n__ -> m' (TensorList (Value) component_types) -- ^ __components__ queueDequeueUpTo' op'options handle n | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs n] buildOp [] (opDef "QueueDequeueUpTo" & opAttr "component_types" .~ fromTensorTypes (Proxy :: Proxy component_types) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_STRING is_ref: true } input_arg { name: "n" type: DT_INT32 } output_arg { name: "components" type_list_attr: "component_types" } attr { name: "component_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "timeout_ms" type: "int" default_value { i: -1 } } -} -- | queueDequeueUpToV2 :: forall v'1 v'2 component_types m' . (MonadBuild m', TensorTypes component_types) => Tensor v'1 ResourceHandle -- ^ __handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __n__ -> m' (TensorList (Value) component_types) -- ^ __components__ queueDequeueUpToV2 = queueDequeueUpToV2' id queueDequeueUpToV2' :: forall v'1 v'2 component_types m' . (MonadBuild m', TensorTypes component_types) => OpParams -> Tensor v'1 ResourceHandle -- ^ __handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __n__ -> m' (TensorList (Value) component_types) -- ^ __components__ queueDequeueUpToV2' op'options handle n | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs n] buildOp [] (opDef "QueueDequeueUpToV2" & opAttr "component_types" .~ fromTensorTypes (Proxy :: Proxy component_types) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_RESOURCE } input_arg { name: "n" type: DT_INT32 } output_arg { name: "components" type_list_attr: "component_types" } attr { name: "component_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "timeout_ms" type: "int" default_value { i: -1 } } -} -- | queueDequeueV2 :: forall v'1 component_types m' . (MonadBuild m', TensorTypes component_types) => Tensor v'1 ResourceHandle -- ^ __handle__ -> m' (TensorList (Value) component_types) -- ^ __components__ queueDequeueV2 = queueDequeueV2' id queueDequeueV2' :: forall v'1 component_types m' . (MonadBuild m', TensorTypes component_types) => OpParams -> Tensor v'1 ResourceHandle -- ^ __handle__ -> m' (TensorList (Value) component_types) -- ^ __components__ queueDequeueV2' op'options handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle] buildOp [] (opDef "QueueDequeueV2" & opAttr "component_types" .~ fromTensorTypes (Proxy :: Proxy component_types) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_RESOURCE } output_arg { name: "components" type_list_attr: "component_types" } attr { name: "component_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "timeout_ms" type: "int" default_value { i: -1 } } -} -- | queueEnqueue :: forall v'2 tcomponents m' . (MonadBuild m', TensorTypes tcomponents) => Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> TensorList (v'2) tcomponents -- ^ __components__ -> m' (ControlNode) queueEnqueue = queueEnqueue' id queueEnqueue' :: forall v'2 tcomponents m' . (MonadBuild m', TensorTypes tcomponents) => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> TensorList (v'2) tcomponents -- ^ __components__ -> m' (ControlNode) queueEnqueue' op'options handle components | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs components] buildOp [] (opDef "QueueEnqueue" & opAttr "Tcomponents" .~ fromTensorTypes (Proxy :: Proxy tcomponents) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_STRING is_ref: true } input_arg { name: "components" type_list_attr: "Tcomponents" } attr { name: "Tcomponents" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "timeout_ms" type: "int" default_value { i: -1 } } -} -- | queueEnqueueMany :: forall v'2 tcomponents m' . (MonadBuild m', TensorTypes tcomponents) => Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> TensorList (v'2) tcomponents -- ^ __components__ -> m' (ControlNode) queueEnqueueMany = queueEnqueueMany' id queueEnqueueMany' :: forall v'2 tcomponents m' . (MonadBuild m', TensorTypes tcomponents) => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> TensorList (v'2) tcomponents -- ^ __components__ -> m' (ControlNode) queueEnqueueMany' op'options handle components | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs components] buildOp [] (opDef "QueueEnqueueMany" & opAttr "Tcomponents" .~ fromTensorTypes (Proxy :: Proxy tcomponents) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_STRING is_ref: true } input_arg { name: "components" type_list_attr: "Tcomponents" } attr { name: "Tcomponents" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "timeout_ms" type: "int" default_value { i: -1 } } -} -- | queueEnqueueManyV2 :: forall v'1 v'2 tcomponents m' . (MonadBuild m', TensorTypes tcomponents) => Tensor v'1 ResourceHandle -- ^ __handle__ -> TensorList (v'2) tcomponents -- ^ __components__ -> m' (ControlNode) queueEnqueueManyV2 = queueEnqueueManyV2' id queueEnqueueManyV2' :: forall v'1 v'2 tcomponents m' . (MonadBuild m', TensorTypes tcomponents) => OpParams -> Tensor v'1 ResourceHandle -- ^ __handle__ -> TensorList (v'2) tcomponents -- ^ __components__ -> m' (ControlNode) queueEnqueueManyV2' op'options handle components | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs components] buildOp [] (opDef "QueueEnqueueManyV2" & opAttr "Tcomponents" .~ fromTensorTypes (Proxy :: Proxy tcomponents) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_RESOURCE } input_arg { name: "components" type_list_attr: "Tcomponents" } attr { name: "Tcomponents" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "timeout_ms" type: "int" default_value { i: -1 } } -} -- | queueEnqueueV2 :: forall v'1 v'2 tcomponents m' . (MonadBuild m', TensorTypes tcomponents) => Tensor v'1 ResourceHandle -- ^ __handle__ -> TensorList (v'2) tcomponents -- ^ __components__ -> m' (ControlNode) queueEnqueueV2 = queueEnqueueV2' id queueEnqueueV2' :: forall v'1 v'2 tcomponents m' . (MonadBuild m', TensorTypes tcomponents) => OpParams -> Tensor v'1 ResourceHandle -- ^ __handle__ -> TensorList (v'2) tcomponents -- ^ __components__ -> m' (ControlNode) queueEnqueueV2' op'options handle components | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs components] buildOp [] (opDef "QueueEnqueueV2" & opAttr "Tcomponents" .~ fromTensorTypes (Proxy :: Proxy tcomponents) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_RESOURCE } input_arg { name: "components" type_list_attr: "Tcomponents" } attr { name: "Tcomponents" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "timeout_ms" type: "int" default_value { i: -1 } } -} -- | queueIsClosed :: forall m' . (MonadBuild m') => Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> m' (Tensor Value Bool) -- ^ __is_closed__ queueIsClosed = queueIsClosed' id queueIsClosed' :: forall m' . (MonadBuild m') => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> m' (Tensor Value Bool) -- ^ __is_closed__ queueIsClosed' op'options handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle] buildOp [] (opDef "QueueIsClosed" & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_STRING is_ref: true } output_arg { name: "is_closed" type: DT_BOOL } -} -- | queueIsClosedV2 :: forall v'1 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __handle__ -> m' (Tensor Value Bool) -- ^ __is_closed__ queueIsClosedV2 = queueIsClosedV2' id queueIsClosedV2' :: forall v'1 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __handle__ -> m' (Tensor Value Bool) -- ^ __is_closed__ queueIsClosedV2' op'options handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle] buildOp [] (opDef "QueueIsClosedV2" & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_RESOURCE } output_arg { name: "is_closed" type: DT_BOOL } -} -- | queueSize :: forall m' . (MonadBuild m') => Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> m' (Tensor Value Data.Int.Int32) -- ^ __size__ queueSize = queueSize' id queueSize' :: forall m' . (MonadBuild m') => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> m' (Tensor Value Data.Int.Int32) -- ^ __size__ queueSize' op'options handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle] buildOp [] (opDef "QueueSize" & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_STRING is_ref: true } output_arg { name: "size" type: DT_INT32 } -} -- | queueSizeV2 :: forall v'1 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __handle__ -> m' (Tensor Value Data.Int.Int32) -- ^ __size__ queueSizeV2 = queueSizeV2' id queueSizeV2' :: forall v'1 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __handle__ -> m' (Tensor Value Data.Int.Int32) -- ^ __size__ queueSizeV2' op'options handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle] buildOp [] (opDef "QueueSizeV2" & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_RESOURCE } output_arg { name: "size" type: DT_INT32 } -} -- | rFFT :: Tensor v'1 Float -- ^ __input__ -> Tensor v'2 Data.Int.Int32 -- ^ __fft_length__ -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__ rFFT = rFFT' id rFFT' :: OpParams -> Tensor v'1 Float -- ^ __input__ -> Tensor v'2 Data.Int.Int32 -- ^ __fft_length__ -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__ rFFT' op'options input fft_length | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs fft_length] return (opDef "RFFT" & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type: DT_FLOAT } input_arg { name: "fft_length" type: DT_INT32 } output_arg { name: "output" type: DT_COMPLEX64 } -} -- | rFFT2D :: Tensor v'1 Float -- ^ __input__ -> Tensor v'2 Data.Int.Int32 -- ^ __fft_length__ -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__ rFFT2D = rFFT2D' id rFFT2D' :: OpParams -> Tensor v'1 Float -- ^ __input__ -> Tensor v'2 Data.Int.Int32 -- ^ __fft_length__ -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__ rFFT2D' op'options input fft_length | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs fft_length] return (opDef "RFFT2D" & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type: DT_FLOAT } input_arg { name: "fft_length" type: DT_INT32 } output_arg { name: "output" type: DT_COMPLEX64 } -} -- | rFFT3D :: Tensor v'1 Float -- ^ __input__ -> Tensor v'2 Data.Int.Int32 -- ^ __fft_length__ -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__ rFFT3D = rFFT3D' id rFFT3D' :: OpParams -> Tensor v'1 Float -- ^ __input__ -> Tensor v'2 Data.Int.Int32 -- ^ __fft_length__ -> Tensor Build (Data.Complex.Complex Float) -- ^ __output__ rFFT3D' op'options input fft_length | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs fft_length] return (opDef "RFFT3D" & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type: DT_FLOAT } input_arg { name: "fft_length" type: DT_INT32 } output_arg { name: "output" type: DT_COMPLEX64 } -} -- | rGBToHSV :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __images__ -> Tensor Build t -- ^ __output__ rGBToHSV = rGBToHSV' id rGBToHSV' :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __images__ -> Tensor Build t -- ^ __output__ rGBToHSV' op'options images | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs images] return (opDef "RGBToHSV" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "images" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_HALF type: DT_BFLOAT16 type: DT_FLOAT type: DT_DOUBLE } } } -} -- | raggedGather :: forall v'1 v'2 v'3 tvalues tindices tsplits . (TensorType tvalues, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices, OneOf '[Data.Int.Int32, Data.Int.Int64] tsplits) => Data.Int.Int64 -- ^ __OUTPUT_RAGGED_RANK__ -> [Tensor v'1 tsplits] -- ^ __params_nested_splits__ -> Tensor v'2 tvalues -- ^ __params_dense_values__ -> Tensor v'3 tindices -- ^ __indices__ -> ([Tensor Build tsplits], Tensor Build tvalues) -- ^ (__output_nested_splits__, __output_dense_values__) -- -- * __output_nested_splits__ -- -- * __output_dense_values__ raggedGather = raggedGather' id raggedGather' :: forall v'1 v'2 v'3 tvalues tindices tsplits . (TensorType tvalues, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices, OneOf '[Data.Int.Int32, Data.Int.Int64] tsplits) => OpParams -> Data.Int.Int64 -- ^ __OUTPUT_RAGGED_RANK__ -> [Tensor v'1 tsplits] -- ^ __params_nested_splits__ -> Tensor v'2 tvalues -- ^ __params_dense_values__ -> Tensor v'3 tindices -- ^ __indices__ -> ([Tensor Build tsplits], Tensor Build tvalues) -- ^ (__output_nested_splits__, __output_dense_values__) -- -- * __output_nested_splits__ -- -- * __output_dense_values__ raggedGather' op'options oUTPUT_RAGGED_RANK params_nested_splits params_dense_values indices | eqLengthGuard [("PARAMS_RAGGED_RANK", [("params_nested_splits", length params_nested_splits)])] = pureOp [oUTPUT_RAGGED_RANK] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs params_nested_splits, buildInputs params_dense_values, buildInputs indices] return (opDef "RaggedGather" & opAttr "Tvalues" .~ tensorType (undefined :: tvalues) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & opAttr "Tsplits" .~ tensorType (undefined :: tsplits) & opAttr "OUTPUT_RAGGED_RANK" .~ oUTPUT_RAGGED_RANK & opAttr "PARAMS_RAGGED_RANK" .~ pARAMS_RAGGED_RANK & op'options & opInputs .~ op'inputs) where pARAMS_RAGGED_RANK = fromIntegral (length params_nested_splits) :: Int64 {- input_arg { name: "params_nested_splits" type_attr: "Tsplits" number_attr: "PARAMS_RAGGED_RANK" } input_arg { name: "params_dense_values" type_attr: "Tvalues" } input_arg { name: "indices" type_attr: "Tindices" } output_arg { name: "output_nested_splits" type_attr: "Tsplits" number_attr: "OUTPUT_RAGGED_RANK" } output_arg { name: "output_dense_values" type_attr: "Tvalues" } attr { name: "Tvalues" type: "type" } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "Tsplits" type: "type" default_value { type: DT_INT64 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "PARAMS_RAGGED_RANK" type: "int" has_minimum: true minimum: 1 } attr { name: "OUTPUT_RAGGED_RANK" type: "int" has_minimum: true } -} -- | raggedRange :: forall v'1 v'2 v'3 t tsplits . (OneOf '[Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tsplits) => Tensor v'1 t -- ^ __starts__ -> Tensor v'2 t -- ^ __limits__ -> Tensor v'3 t -- ^ __deltas__ -> (Tensor Build tsplits, Tensor Build t) -- ^ (__rt_nested_splits__, __rt_dense_values__) -- -- * __rt_nested_splits__ -- -- * __rt_dense_values__ raggedRange = raggedRange' id raggedRange' :: forall v'1 v'2 v'3 t tsplits . (OneOf '[Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tsplits) => OpParams -> Tensor v'1 t -- ^ __starts__ -> Tensor v'2 t -- ^ __limits__ -> Tensor v'3 t -- ^ __deltas__ -> (Tensor Build tsplits, Tensor Build t) -- ^ (__rt_nested_splits__, __rt_dense_values__) -- -- * __rt_nested_splits__ -- -- * __rt_dense_values__ raggedRange' op'options starts limits deltas | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs starts, buildInputs limits, buildInputs deltas] return (opDef "RaggedRange" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tsplits" .~ tensorType (undefined :: tsplits) & op'options & opInputs .~ op'inputs) {- input_arg { name: "starts" type_attr: "T" } input_arg { name: "limits" type_attr: "T" } input_arg { name: "deltas" type_attr: "T" } output_arg { name: "rt_nested_splits" type_attr: "Tsplits" } output_arg { name: "rt_dense_values" type_attr: "T" } attr { name: "T" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_BFLOAT16 type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 } } } attr { name: "Tsplits" type: "type" default_value { type: DT_INT64 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | raggedTensorFromVariant :: forall v'1 tvalues tsplits . (TensorType tvalues, OneOf '[Data.Int.Int32, Data.Int.Int64] tsplits) => Data.Int.Int64 -- ^ __input_ragged_rank__ -> Data.Int.Int64 -- ^ __output_ragged_rank__ -> Tensor v'1 Variant -- ^ __encoded_ragged__ -> ([Tensor Build tsplits], Tensor Build tvalues) -- ^ (__output_nested_splits__, __output_dense_values__) -- -- * __output_nested_splits__ -- -- * __output_dense_values__ raggedTensorFromVariant = raggedTensorFromVariant' id raggedTensorFromVariant' :: forall v'1 tvalues tsplits . (TensorType tvalues, OneOf '[Data.Int.Int32, Data.Int.Int64] tsplits) => OpParams -> Data.Int.Int64 -- ^ __input_ragged_rank__ -> Data.Int.Int64 -- ^ __output_ragged_rank__ -> Tensor v'1 Variant -- ^ __encoded_ragged__ -> ([Tensor Build tsplits], Tensor Build tvalues) -- ^ (__output_nested_splits__, __output_dense_values__) -- -- * __output_nested_splits__ -- -- * __output_dense_values__ raggedTensorFromVariant' op'options input_ragged_rank output_ragged_rank encoded_ragged | eqLengthGuard [] = pureOp [output_ragged_rank] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs encoded_ragged] return (opDef "RaggedTensorFromVariant" & opAttr "Tvalues" .~ tensorType (undefined :: tvalues) & opAttr "Tsplits" .~ tensorType (undefined :: tsplits) & opAttr "input_ragged_rank" .~ input_ragged_rank & opAttr "output_ragged_rank" .~ output_ragged_rank & op'options & opInputs .~ op'inputs) {- input_arg { name: "encoded_ragged" type: DT_VARIANT } output_arg { name: "output_nested_splits" type_attr: "Tsplits" number_attr: "output_ragged_rank" } output_arg { name: "output_dense_values" type_attr: "Tvalues" } attr { name: "input_ragged_rank" type: "int" has_minimum: true minimum: -1 } attr { name: "output_ragged_rank" type: "int" has_minimum: true minimum: 1 } attr { name: "Tvalues" type: "type" } attr { name: "Tsplits" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | raggedTensorToSparse :: forall v'1 v'2 t tsplits . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tsplits) => [Tensor v'1 tsplits] -- ^ __rt_nested_splits__ -> Tensor v'2 t -- ^ __rt_dense_values__ -> (Tensor Build Data.Int.Int64, Tensor Build t, Tensor Build Data.Int.Int64) -- ^ (__sparse_indices__, __sparse_values__, __sparse_dense_shape__) -- -- * __sparse_indices__ -- -- * __sparse_values__ -- -- * __sparse_dense_shape__ raggedTensorToSparse = raggedTensorToSparse' id raggedTensorToSparse' :: forall v'1 v'2 t tsplits . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tsplits) => OpParams -> [Tensor v'1 tsplits] -- ^ __rt_nested_splits__ -> Tensor v'2 t -- ^ __rt_dense_values__ -> (Tensor Build Data.Int.Int64, Tensor Build t, Tensor Build Data.Int.Int64) -- ^ (__sparse_indices__, __sparse_values__, __sparse_dense_shape__) -- -- * __sparse_indices__ -- -- * __sparse_values__ -- -- * __sparse_dense_shape__ raggedTensorToSparse' op'options rt_nested_splits rt_dense_values | eqLengthGuard [("RAGGED_RANK", [("rt_nested_splits", length rt_nested_splits)])] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs rt_nested_splits, buildInputs rt_dense_values] return (opDef "RaggedTensorToSparse" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tsplits" .~ tensorType (undefined :: tsplits) & opAttr "RAGGED_RANK" .~ rAGGED_RANK & op'options & opInputs .~ op'inputs) where rAGGED_RANK = fromIntegral (length rt_nested_splits) :: Int64 {- input_arg { name: "rt_nested_splits" type_attr: "Tsplits" number_attr: "RAGGED_RANK" } input_arg { name: "rt_dense_values" type_attr: "T" } output_arg { name: "sparse_indices" type: DT_INT64 } output_arg { name: "sparse_values" type_attr: "T" } output_arg { name: "sparse_dense_shape" type: DT_INT64 } attr { name: "RAGGED_RANK" type: "int" has_minimum: true minimum: 1 } attr { name: "T" type: "type" } attr { name: "Tsplits" type: "type" default_value { type: DT_INT64 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | raggedTensorToVariant :: forall v'1 v'2 tvalues tsplits . (TensorType tvalues, OneOf '[Data.Int.Int32, Data.Int.Int64] tsplits) => Bool -- ^ __batched_input__ -> [Tensor v'1 tsplits] -- ^ __rt_nested_splits__ -> Tensor v'2 tvalues -- ^ __rt_dense_values__ -> Tensor Build Variant -- ^ __encoded_ragged__ raggedTensorToVariant = raggedTensorToVariant' id raggedTensorToVariant' :: forall v'1 v'2 tvalues tsplits . (TensorType tvalues, OneOf '[Data.Int.Int32, Data.Int.Int64] tsplits) => OpParams -> Bool -- ^ __batched_input__ -> [Tensor v'1 tsplits] -- ^ __rt_nested_splits__ -> Tensor v'2 tvalues -- ^ __rt_dense_values__ -> Tensor Build Variant -- ^ __encoded_ragged__ raggedTensorToVariant' op'options batched_input rt_nested_splits rt_dense_values | eqLengthGuard [("RAGGED_RANK", [("rt_nested_splits", length rt_nested_splits)])] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs rt_nested_splits, buildInputs rt_dense_values] return (opDef "RaggedTensorToVariant" & opAttr "Tvalues" .~ tensorType (undefined :: tvalues) & opAttr "Tsplits" .~ tensorType (undefined :: tsplits) & opAttr "batched_input" .~ batched_input & opAttr "RAGGED_RANK" .~ rAGGED_RANK & op'options & opInputs .~ op'inputs) where rAGGED_RANK = fromIntegral (length rt_nested_splits) :: Int64 {- input_arg { name: "rt_nested_splits" type_attr: "Tsplits" number_attr: "RAGGED_RANK" } input_arg { name: "rt_dense_values" type_attr: "Tvalues" } output_arg { name: "encoded_ragged" type: DT_VARIANT } attr { name: "RAGGED_RANK" type: "int" has_minimum: true minimum: 1 } attr { name: "Tvalues" type: "type" } attr { name: "Tsplits" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "batched_input" type: "bool" } -} -- | randomCrop :: forall v'1 v'2 t m' . (MonadBuild m', OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __image__ -> Tensor v'2 Data.Int.Int64 -- ^ __size__ -> m' (Tensor Value t) -- ^ __output__ randomCrop = randomCrop' id randomCrop' :: forall v'1 v'2 t m' . (MonadBuild m', OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __image__ -> Tensor v'2 Data.Int.Int64 -- ^ __size__ -> m' (Tensor Value t) -- ^ __output__ randomCrop' op'options image size | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs image, buildInputs size] buildOp [] (opDef "RandomCrop" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "image" type_attr: "T" } input_arg { name: "size" type: DT_INT64 } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_UINT8 type: DT_INT8 type: DT_INT16 type: DT_INT32 type: DT_INT64 type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "seed" type: "int" default_value { i: 0 } } attr { name: "seed2" type: "int" default_value { i: 0 } } -} -- | randomGamma :: forall v'1 v'2 s t m' . (MonadBuild m', OneOf '[Data.Int.Int32, Data.Int.Int64] s, OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 s -- ^ __shape__ -> Tensor v'2 t -- ^ __alpha__ -> m' (Tensor Value t) -- ^ __output__ randomGamma = randomGamma' id randomGamma' :: forall v'1 v'2 s t m' . (MonadBuild m', OneOf '[Data.Int.Int32, Data.Int.Int64] s, OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 s -- ^ __shape__ -> Tensor v'2 t -- ^ __alpha__ -> m' (Tensor Value t) -- ^ __output__ randomGamma' op'options shape alpha | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs shape, buildInputs alpha] buildOp [] (opDef "RandomGamma" & opAttr "S" .~ tensorType (undefined :: s) & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "shape" type_attr: "S" } input_arg { name: "alpha" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "seed" type: "int" default_value { i: 0 } } attr { name: "seed2" type: "int" default_value { i: 0 } } attr { name: "S" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } -} -- | randomGammaGrad :: forall v'1 v'2 t . (OneOf '[Double, Float] t) => Tensor v'1 t -- ^ __alpha__ -> Tensor v'2 t -- ^ __sample__ -> Tensor Build t -- ^ __output__ randomGammaGrad = randomGammaGrad' id randomGammaGrad' :: forall v'1 v'2 t . (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __alpha__ -> Tensor v'2 t -- ^ __sample__ -> Tensor Build t -- ^ __output__ randomGammaGrad' op'options alpha sample | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs alpha, buildInputs sample] return (opDef "RandomGammaGrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "alpha" type_attr: "T" } input_arg { name: "sample" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } -} -- | randomPoisson :: forall v'1 v'2 s dtype m' . (MonadBuild m', OneOf '[Data.Int.Int32, Data.Int.Int64] s, OneOf '[Data.Word.Word16, Double, Float] dtype) => Tensor v'1 s -- ^ __shape__ -> Tensor v'2 dtype -- ^ __rate__ -> m' (Tensor Value dtype) -- ^ __output__ randomPoisson = randomPoisson' id randomPoisson' :: forall v'1 v'2 s dtype m' . (MonadBuild m', OneOf '[Data.Int.Int32, Data.Int.Int64] s, OneOf '[Data.Word.Word16, Double, Float] dtype) => OpParams -> Tensor v'1 s -- ^ __shape__ -> Tensor v'2 dtype -- ^ __rate__ -> m' (Tensor Value dtype) -- ^ __output__ randomPoisson' op'options shape rate | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs shape, buildInputs rate] buildOp [] (opDef "RandomPoisson" & opAttr "S" .~ tensorType (undefined :: s) & opAttr "dtype" .~ tensorType (undefined :: dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "shape" type_attr: "S" } input_arg { name: "rate" type_attr: "dtype" } output_arg { name: "output" type_attr: "dtype" } attr { name: "seed" type: "int" default_value { i: 0 } } attr { name: "seed2" type: "int" default_value { i: 0 } } attr { name: "S" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "dtype" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } -} -- | randomPoissonV2 :: forall v'1 v'2 s r dtype m' . (MonadBuild m', OneOf '[Data.Int.Int32, Data.Int.Int64] s, OneOf '[Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] r, OneOf '[Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] dtype) => Tensor v'1 s -- ^ __shape__ -> Tensor v'2 r -- ^ __rate__ -> m' (Tensor Value dtype) -- ^ __output__ randomPoissonV2 = randomPoissonV2' id randomPoissonV2' :: forall v'1 v'2 s r dtype m' . (MonadBuild m', OneOf '[Data.Int.Int32, Data.Int.Int64] s, OneOf '[Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] r, OneOf '[Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] dtype) => OpParams -> Tensor v'1 s -- ^ __shape__ -> Tensor v'2 r -- ^ __rate__ -> m' (Tensor Value dtype) -- ^ __output__ randomPoissonV2' op'options shape rate | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs shape, buildInputs rate] buildOp [] (opDef "RandomPoissonV2" & opAttr "S" .~ tensorType (undefined :: s) & opAttr "R" .~ tensorType (undefined :: r) & opAttr "dtype" .~ tensorType (undefined :: dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "shape" type_attr: "S" } input_arg { name: "rate" type_attr: "R" } output_arg { name: "output" type_attr: "dtype" } attr { name: "seed" type: "int" default_value { i: 0 } } attr { name: "seed2" type: "int" default_value { i: 0 } } attr { name: "S" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "R" type: "type" default_value { type: DT_DOUBLE } allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 } } } attr { name: "dtype" type: "type" default_value { type: DT_INT64 } allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 } } } -} -- | randomShuffle :: forall v'1 t m' . (MonadBuild m', TensorType t) => Tensor v'1 t -- ^ __value__ -> m' (Tensor Value t) -- ^ __output__ randomShuffle = randomShuffle' id randomShuffle' :: forall v'1 t m' . (MonadBuild m', TensorType t) => OpParams -> Tensor v'1 t -- ^ __value__ -> m' (Tensor Value t) -- ^ __output__ randomShuffle' op'options value | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs value] buildOp [] (opDef "RandomShuffle" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "value" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "seed" type: "int" default_value { i: 0 } } attr { name: "seed2" type: "int" default_value { i: 0 } } attr { name: "T" type: "type" } -} -- | randomShuffleQueue :: forall m' . (MonadBuild m') => [DataType] -- ^ __component_types__ -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __handle__ randomShuffleQueue = randomShuffleQueue' id randomShuffleQueue' :: forall m' . (MonadBuild m') => OpParams -> [DataType] -- ^ __component_types__ -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __handle__ randomShuffleQueue' op'options component_types | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "RandomShuffleQueue" & opAttr "component_types" .~ component_types & op'options & opInputs .~ op'inputs) {- output_arg { name: "handle" type: DT_STRING is_ref: true } attr { name: "component_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "shapes" type: "list(shape)" default_value { list { } } has_minimum: true } attr { name: "capacity" type: "int" default_value { i: -1 } } attr { name: "min_after_dequeue" type: "int" default_value { i: 0 } } attr { name: "seed" type: "int" default_value { i: 0 } } attr { name: "seed2" type: "int" default_value { i: 0 } } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } -} -- | randomShuffleQueueV2 :: forall m' . (MonadBuild m') => [DataType] -- ^ __component_types__ -> m' (Tensor Value ResourceHandle) -- ^ __handle__ randomShuffleQueueV2 = randomShuffleQueueV2' id randomShuffleQueueV2' :: forall m' . (MonadBuild m') => OpParams -> [DataType] -- ^ __component_types__ -> m' (Tensor Value ResourceHandle) -- ^ __handle__ randomShuffleQueueV2' op'options component_types | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "RandomShuffleQueueV2" & opAttr "component_types" .~ component_types & op'options & opInputs .~ op'inputs) {- output_arg { name: "handle" type: DT_RESOURCE } attr { name: "component_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "shapes" type: "list(shape)" default_value { list { } } has_minimum: true } attr { name: "capacity" type: "int" default_value { i: -1 } } attr { name: "min_after_dequeue" type: "int" default_value { i: 0 } } attr { name: "seed" type: "int" default_value { i: 0 } } attr { name: "seed2" type: "int" default_value { i: 0 } } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } -} -- | randomStandardNormal :: forall v'1 dtype t m' . (MonadBuild m', OneOf '[Data.Word.Word16, Double, Float] dtype, OneOf '[Data.Int.Int32, Data.Int.Int64] t) => Tensor v'1 t -- ^ __shape__ -> m' (Tensor Value dtype) -- ^ __output__ randomStandardNormal = randomStandardNormal' id randomStandardNormal' :: forall v'1 dtype t m' . (MonadBuild m', OneOf '[Data.Word.Word16, Double, Float] dtype, OneOf '[Data.Int.Int32, Data.Int.Int64] t) => OpParams -> Tensor v'1 t -- ^ __shape__ -> m' (Tensor Value dtype) -- ^ __output__ randomStandardNormal' op'options shape | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs shape] buildOp [] (opDef "RandomStandardNormal" & opAttr "dtype" .~ tensorType (undefined :: dtype) & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "shape" type_attr: "T" } output_arg { name: "output" type_attr: "dtype" } attr { name: "seed" type: "int" default_value { i: 0 } } attr { name: "seed2" type: "int" default_value { i: 0 } } attr { name: "dtype" type: "type" allowed_values { list { type: DT_HALF type: DT_BFLOAT16 type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "T" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | randomUniform :: forall v'1 dtype t m' . (MonadBuild m', OneOf '[Data.Word.Word16, Double, Float] dtype, OneOf '[Data.Int.Int32, Data.Int.Int64] t) => Tensor v'1 t -- ^ __shape__ -> m' (Tensor Value dtype) -- ^ __output__ randomUniform = randomUniform' id randomUniform' :: forall v'1 dtype t m' . (MonadBuild m', OneOf '[Data.Word.Word16, Double, Float] dtype, OneOf '[Data.Int.Int32, Data.Int.Int64] t) => OpParams -> Tensor v'1 t -- ^ __shape__ -> m' (Tensor Value dtype) -- ^ __output__ randomUniform' op'options shape | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs shape] buildOp [] (opDef "RandomUniform" & opAttr "dtype" .~ tensorType (undefined :: dtype) & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "shape" type_attr: "T" } output_arg { name: "output" type_attr: "dtype" } attr { name: "seed" type: "int" default_value { i: 0 } } attr { name: "seed2" type: "int" default_value { i: 0 } } attr { name: "dtype" type: "type" allowed_values { list { type: DT_HALF type: DT_BFLOAT16 type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "T" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | randomUniformInt :: forall v'1 v'2 v'3 tout t m' . (MonadBuild m', OneOf '[Data.Int.Int32, Data.Int.Int64] tout, OneOf '[Data.Int.Int32, Data.Int.Int64] t) => Tensor v'1 t -- ^ __shape__ -> Tensor v'2 tout -- ^ __minval__ -> Tensor v'3 tout -- ^ __maxval__ -> m' (Tensor Value tout) -- ^ __output__ randomUniformInt = randomUniformInt' id randomUniformInt' :: forall v'1 v'2 v'3 tout t m' . (MonadBuild m', OneOf '[Data.Int.Int32, Data.Int.Int64] tout, OneOf '[Data.Int.Int32, Data.Int.Int64] t) => OpParams -> Tensor v'1 t -- ^ __shape__ -> Tensor v'2 tout -- ^ __minval__ -> Tensor v'3 tout -- ^ __maxval__ -> m' (Tensor Value tout) -- ^ __output__ randomUniformInt' op'options shape minval maxval | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs shape, buildInputs minval, buildInputs maxval] buildOp [] (opDef "RandomUniformInt" & opAttr "Tout" .~ tensorType (undefined :: tout) & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "shape" type_attr: "T" } input_arg { name: "minval" type_attr: "Tout" } input_arg { name: "maxval" type_attr: "Tout" } output_arg { name: "output" type_attr: "Tout" } attr { name: "seed" type: "int" default_value { i: 0 } } attr { name: "seed2" type: "int" default_value { i: 0 } } attr { name: "Tout" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "T" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | range :: forall v'1 v'2 v'3 tidx . (OneOf '[Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] tidx) => Tensor v'1 tidx -- ^ __start__ -> Tensor v'2 tidx -- ^ __limit__ -> Tensor v'3 tidx -- ^ __delta__ -> Tensor Build tidx -- ^ __output__ range = range' id range' :: forall v'1 v'2 v'3 tidx . (OneOf '[Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] tidx) => OpParams -> Tensor v'1 tidx -- ^ __start__ -> Tensor v'2 tidx -- ^ __limit__ -> Tensor v'3 tidx -- ^ __delta__ -> Tensor Build tidx -- ^ __output__ range' op'options start limit delta | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs start, buildInputs limit, buildInputs delta] return (opDef "Range" & opAttr "Tidx" .~ tensorType (undefined :: tidx) & op'options & opInputs .~ op'inputs) {- input_arg { name: "start" type_attr: "Tidx" } input_arg { name: "limit" type_attr: "Tidx" } input_arg { name: "delta" type_attr: "Tidx" } output_arg { name: "output" type_attr: "Tidx" } attr { name: "Tidx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_BFLOAT16 type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 } } } -} -- | rangeDataset :: forall v'1 v'2 v'3 m' . (MonadBuild m') => [DataType] -- ^ __output_types__ -> Tensor v'1 Data.Int.Int64 -- ^ __start__ -> Tensor v'2 Data.Int.Int64 -- ^ __stop__ -> Tensor v'3 Data.Int.Int64 -- ^ __step__ -> m' (Tensor Value Variant) -- ^ __handle__ rangeDataset = rangeDataset' id rangeDataset' :: forall v'1 v'2 v'3 m' . (MonadBuild m') => OpParams -> [DataType] -- ^ __output_types__ -> Tensor v'1 Data.Int.Int64 -- ^ __start__ -> Tensor v'2 Data.Int.Int64 -- ^ __stop__ -> Tensor v'3 Data.Int.Int64 -- ^ __step__ -> m' (Tensor Value Variant) -- ^ __handle__ rangeDataset' op'options output_types start stop step | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs start, buildInputs stop, buildInputs step] buildOp [] (opDef "RangeDataset" & opAttr "output_types" .~ output_types & op'options & opInputs .~ op'inputs) {- input_arg { name: "start" type: DT_INT64 } input_arg { name: "stop" type: DT_INT64 } input_arg { name: "step" type: DT_INT64 } output_arg { name: "handle" type: DT_VARIANT } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } -} -- | rank :: forall v'1 t . (TensorType t) => Tensor v'1 t -- ^ __input__ -> Tensor Build Data.Int.Int32 -- ^ __output__ rank = rank' id rank' :: forall v'1 t . (TensorType t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor Build Data.Int.Int32 -- ^ __output__ rank' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "Rank" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type: DT_INT32 } attr { name: "T" type: "type" } -} -- | readFile :: Tensor v'1 Data.ByteString.ByteString -- ^ __filename__ -> Tensor Build Data.ByteString.ByteString -- ^ __contents__ readFile = readFile' id readFile' :: OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __filename__ -> Tensor Build Data.ByteString.ByteString -- ^ __contents__ readFile' op'options filename | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs filename] return (opDef "ReadFile" & op'options & opInputs .~ op'inputs) {- input_arg { name: "filename" type: DT_STRING } output_arg { name: "contents" type: DT_STRING } -} -- | readVariableOp :: forall v'1 dtype m' . (MonadBuild m', TensorType dtype) => Tensor v'1 ResourceHandle -- ^ __resource__ -> m' (Tensor Value dtype) -- ^ __value__ readVariableOp = readVariableOp' id readVariableOp' :: forall v'1 dtype m' . (MonadBuild m', TensorType dtype) => OpParams -> Tensor v'1 ResourceHandle -- ^ __resource__ -> m' (Tensor Value dtype) -- ^ __value__ readVariableOp' op'options resource | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs resource] buildOp [] (opDef "ReadVariableOp" & opAttr "dtype" .~ tensorType (undefined :: dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "resource" type: DT_RESOURCE } output_arg { name: "value" type_attr: "dtype" } attr { name: "dtype" type: "type" } -} -- | readerNumRecordsProduced :: forall m' . (MonadBuild m') => Tensor Ref Data.ByteString.ByteString -- ^ __reader_handle__ -> m' (Tensor Value Data.Int.Int64) -- ^ __records_produced__ readerNumRecordsProduced = readerNumRecordsProduced' id readerNumRecordsProduced' :: forall m' . (MonadBuild m') => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __reader_handle__ -> m' (Tensor Value Data.Int.Int64) -- ^ __records_produced__ readerNumRecordsProduced' op'options reader_handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs reader_handle] buildOp [] (opDef "ReaderNumRecordsProduced" & op'options & opInputs .~ op'inputs) {- input_arg { name: "reader_handle" type: DT_STRING is_ref: true } output_arg { name: "records_produced" type: DT_INT64 } -} -- | readerNumRecordsProducedV2 :: forall v'1 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __reader_handle__ -> m' (Tensor Value Data.Int.Int64) -- ^ __records_produced__ readerNumRecordsProducedV2 = readerNumRecordsProducedV2' id readerNumRecordsProducedV2' :: forall v'1 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __reader_handle__ -> m' (Tensor Value Data.Int.Int64) -- ^ __records_produced__ readerNumRecordsProducedV2' op'options reader_handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs reader_handle] buildOp [] (opDef "ReaderNumRecordsProducedV2" & op'options & opInputs .~ op'inputs) {- input_arg { name: "reader_handle" type: DT_RESOURCE } output_arg { name: "records_produced" type: DT_INT64 } -} -- | readerNumWorkUnitsCompleted :: forall m' . (MonadBuild m') => Tensor Ref Data.ByteString.ByteString -- ^ __reader_handle__ -> m' (Tensor Value Data.Int.Int64) -- ^ __units_completed__ readerNumWorkUnitsCompleted = readerNumWorkUnitsCompleted' id readerNumWorkUnitsCompleted' :: forall m' . (MonadBuild m') => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __reader_handle__ -> m' (Tensor Value Data.Int.Int64) -- ^ __units_completed__ readerNumWorkUnitsCompleted' op'options reader_handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs reader_handle] buildOp [] (opDef "ReaderNumWorkUnitsCompleted" & op'options & opInputs .~ op'inputs) {- input_arg { name: "reader_handle" type: DT_STRING is_ref: true } output_arg { name: "units_completed" type: DT_INT64 } -} -- | readerNumWorkUnitsCompletedV2 :: forall v'1 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __reader_handle__ -> m' (Tensor Value Data.Int.Int64) -- ^ __units_completed__ readerNumWorkUnitsCompletedV2 = readerNumWorkUnitsCompletedV2' id readerNumWorkUnitsCompletedV2' :: forall v'1 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __reader_handle__ -> m' (Tensor Value Data.Int.Int64) -- ^ __units_completed__ readerNumWorkUnitsCompletedV2' op'options reader_handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs reader_handle] buildOp [] (opDef "ReaderNumWorkUnitsCompletedV2" & op'options & opInputs .~ op'inputs) {- input_arg { name: "reader_handle" type: DT_RESOURCE } output_arg { name: "units_completed" type: DT_INT64 } -} -- | readerRead :: forall m' . (MonadBuild m') => Tensor Ref Data.ByteString.ByteString -- ^ __reader_handle__ -> Tensor Ref Data.ByteString.ByteString -- ^ __queue_handle__ -> m' ((Tensor Value Data.ByteString.ByteString, Tensor Value Data.ByteString.ByteString)) -- ^ (__key__, __value__) -- -- * __key__ -- -- * __value__ readerRead = readerRead' id readerRead' :: forall m' . (MonadBuild m') => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __reader_handle__ -> Tensor Ref Data.ByteString.ByteString -- ^ __queue_handle__ -> m' ((Tensor Value Data.ByteString.ByteString, Tensor Value Data.ByteString.ByteString)) -- ^ (__key__, __value__) -- -- * __key__ -- -- * __value__ readerRead' op'options reader_handle queue_handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs reader_handle, buildInputs queue_handle] buildOp [] (opDef "ReaderRead" & op'options & opInputs .~ op'inputs) {- input_arg { name: "reader_handle" type: DT_STRING is_ref: true } input_arg { name: "queue_handle" type: DT_STRING is_ref: true } output_arg { name: "key" type: DT_STRING } output_arg { name: "value" type: DT_STRING } -} -- | readerReadUpTo :: forall v'3 m' . (MonadBuild m') => Tensor Ref Data.ByteString.ByteString -- ^ __reader_handle__ -> Tensor Ref Data.ByteString.ByteString -- ^ __queue_handle__ -> Tensor v'3 Data.Int.Int64 -- ^ __num_records__ -> m' ((Tensor Value Data.ByteString.ByteString, Tensor Value Data.ByteString.ByteString)) -- ^ (__keys__, __values__) -- -- * __keys__ -- -- * __values__ readerReadUpTo = readerReadUpTo' id readerReadUpTo' :: forall v'3 m' . (MonadBuild m') => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __reader_handle__ -> Tensor Ref Data.ByteString.ByteString -- ^ __queue_handle__ -> Tensor v'3 Data.Int.Int64 -- ^ __num_records__ -> m' ((Tensor Value Data.ByteString.ByteString, Tensor Value Data.ByteString.ByteString)) -- ^ (__keys__, __values__) -- -- * __keys__ -- -- * __values__ readerReadUpTo' op'options reader_handle queue_handle num_records | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs reader_handle, buildInputs queue_handle, buildInputs num_records] buildOp [] (opDef "ReaderReadUpTo" & op'options & opInputs .~ op'inputs) {- input_arg { name: "reader_handle" type: DT_STRING is_ref: true } input_arg { name: "queue_handle" type: DT_STRING is_ref: true } input_arg { name: "num_records" type: DT_INT64 } output_arg { name: "keys" type: DT_STRING } output_arg { name: "values" type: DT_STRING } -} -- | readerReadUpToV2 :: forall v'1 v'2 v'3 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __reader_handle__ -> Tensor v'2 ResourceHandle -- ^ __queue_handle__ -> Tensor v'3 Data.Int.Int64 -- ^ __num_records__ -> m' ((Tensor Value Data.ByteString.ByteString, Tensor Value Data.ByteString.ByteString)) -- ^ (__keys__, __values__) -- -- * __keys__ -- -- * __values__ readerReadUpToV2 = readerReadUpToV2' id readerReadUpToV2' :: forall v'1 v'2 v'3 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __reader_handle__ -> Tensor v'2 ResourceHandle -- ^ __queue_handle__ -> Tensor v'3 Data.Int.Int64 -- ^ __num_records__ -> m' ((Tensor Value Data.ByteString.ByteString, Tensor Value Data.ByteString.ByteString)) -- ^ (__keys__, __values__) -- -- * __keys__ -- -- * __values__ readerReadUpToV2' op'options reader_handle queue_handle num_records | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs reader_handle, buildInputs queue_handle, buildInputs num_records] buildOp [] (opDef "ReaderReadUpToV2" & op'options & opInputs .~ op'inputs) {- input_arg { name: "reader_handle" type: DT_RESOURCE } input_arg { name: "queue_handle" type: DT_RESOURCE } input_arg { name: "num_records" type: DT_INT64 } output_arg { name: "keys" type: DT_STRING } output_arg { name: "values" type: DT_STRING } -} -- | readerReadV2 :: forall v'1 v'2 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __reader_handle__ -> Tensor v'2 ResourceHandle -- ^ __queue_handle__ -> m' ((Tensor Value Data.ByteString.ByteString, Tensor Value Data.ByteString.ByteString)) -- ^ (__key__, __value__) -- -- * __key__ -- -- * __value__ readerReadV2 = readerReadV2' id readerReadV2' :: forall v'1 v'2 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __reader_handle__ -> Tensor v'2 ResourceHandle -- ^ __queue_handle__ -> m' ((Tensor Value Data.ByteString.ByteString, Tensor Value Data.ByteString.ByteString)) -- ^ (__key__, __value__) -- -- * __key__ -- -- * __value__ readerReadV2' op'options reader_handle queue_handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs reader_handle, buildInputs queue_handle] buildOp [] (opDef "ReaderReadV2" & op'options & opInputs .~ op'inputs) {- input_arg { name: "reader_handle" type: DT_RESOURCE } input_arg { name: "queue_handle" type: DT_RESOURCE } output_arg { name: "key" type: DT_STRING } output_arg { name: "value" type: DT_STRING } -} -- | readerReset :: forall m' . (MonadBuild m') => Tensor Ref Data.ByteString.ByteString -- ^ __reader_handle__ -> m' (ControlNode) readerReset = readerReset' id readerReset' :: forall m' . (MonadBuild m') => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __reader_handle__ -> m' (ControlNode) readerReset' op'options reader_handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs reader_handle] buildOp [] (opDef "ReaderReset" & op'options & opInputs .~ op'inputs) {- input_arg { name: "reader_handle" type: DT_STRING is_ref: true } -} -- | readerResetV2 :: forall v'1 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __reader_handle__ -> m' (ControlNode) readerResetV2 = readerResetV2' id readerResetV2' :: forall v'1 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __reader_handle__ -> m' (ControlNode) readerResetV2' op'options reader_handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs reader_handle] buildOp [] (opDef "ReaderResetV2" & op'options & opInputs .~ op'inputs) {- input_arg { name: "reader_handle" type: DT_RESOURCE } -} -- | readerRestoreState :: forall v'2 m' . (MonadBuild m') => Tensor Ref Data.ByteString.ByteString -- ^ __reader_handle__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __state__ -> m' (ControlNode) readerRestoreState = readerRestoreState' id readerRestoreState' :: forall v'2 m' . (MonadBuild m') => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __reader_handle__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __state__ -> m' (ControlNode) readerRestoreState' op'options reader_handle state | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs reader_handle, buildInputs state] buildOp [] (opDef "ReaderRestoreState" & op'options & opInputs .~ op'inputs) {- input_arg { name: "reader_handle" type: DT_STRING is_ref: true } input_arg { name: "state" type: DT_STRING } -} -- | readerRestoreStateV2 :: forall v'1 v'2 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __reader_handle__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __state__ -> m' (ControlNode) readerRestoreStateV2 = readerRestoreStateV2' id readerRestoreStateV2' :: forall v'1 v'2 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __reader_handle__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __state__ -> m' (ControlNode) readerRestoreStateV2' op'options reader_handle state | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs reader_handle, buildInputs state] buildOp [] (opDef "ReaderRestoreStateV2" & op'options & opInputs .~ op'inputs) {- input_arg { name: "reader_handle" type: DT_RESOURCE } input_arg { name: "state" type: DT_STRING } -} -- | readerSerializeState :: forall m' . (MonadBuild m') => Tensor Ref Data.ByteString.ByteString -- ^ __reader_handle__ -> m' (Tensor Value Data.ByteString.ByteString) -- ^ __state__ readerSerializeState = readerSerializeState' id readerSerializeState' :: forall m' . (MonadBuild m') => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __reader_handle__ -> m' (Tensor Value Data.ByteString.ByteString) -- ^ __state__ readerSerializeState' op'options reader_handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs reader_handle] buildOp [] (opDef "ReaderSerializeState" & op'options & opInputs .~ op'inputs) {- input_arg { name: "reader_handle" type: DT_STRING is_ref: true } output_arg { name: "state" type: DT_STRING } -} -- | readerSerializeStateV2 :: forall v'1 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __reader_handle__ -> m' (Tensor Value Data.ByteString.ByteString) -- ^ __state__ readerSerializeStateV2 = readerSerializeStateV2' id readerSerializeStateV2' :: forall v'1 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __reader_handle__ -> m' (Tensor Value Data.ByteString.ByteString) -- ^ __state__ readerSerializeStateV2' op'options reader_handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs reader_handle] buildOp [] (opDef "ReaderSerializeStateV2" & op'options & opInputs .~ op'inputs) {- input_arg { name: "reader_handle" type: DT_RESOURCE } output_arg { name: "state" type: DT_STRING } -} -- | real :: forall v'1 t tout . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float)] t, OneOf '[Double, Float] tout) => Tensor v'1 t -- ^ __input__ -> Tensor Build tout -- ^ __output__ real = real' id real' :: forall v'1 t tout . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float)] t, OneOf '[Double, Float] tout) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor Build tout -- ^ __output__ real' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "Real" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tout" .~ tensorType (undefined :: tout) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type_attr: "Tout" } attr { name: "T" type: "type" default_value { type: DT_COMPLEX64 } allowed_values { list { type: DT_COMPLEX64 type: DT_COMPLEX128 } } } attr { name: "Tout" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } -} -- | realDiv :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ realDiv = realDiv' id realDiv' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ realDiv' op'options x y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y] return (opDef "RealDiv" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_UINT8 type: DT_INT8 type: DT_UINT16 type: DT_INT16 type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | reciprocal :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ reciprocal = reciprocal' id reciprocal' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ reciprocal' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "Reciprocal" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | reciprocalGrad :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __y__ -> Tensor v'2 t -- ^ __dy__ -> Tensor Build t -- ^ __z__ reciprocalGrad = reciprocalGrad' id reciprocalGrad' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __y__ -> Tensor v'2 t -- ^ __dy__ -> Tensor Build t -- ^ __z__ reciprocalGrad' op'options y dy | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs y, buildInputs dy] return (opDef "ReciprocalGrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "y" type_attr: "T" } input_arg { name: "dy" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | recordInput :: forall m' . (MonadBuild m') => m' (Tensor Value Data.ByteString.ByteString) -- ^ __records__ recordInput = recordInput' id recordInput' :: forall m' . (MonadBuild m') => OpParams -> m' (Tensor Value Data.ByteString.ByteString) -- ^ __records__ recordInput' op'options | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "RecordInput" & op'options & opInputs .~ op'inputs) {- output_arg { name: "records" type: DT_STRING } attr { name: "file_pattern" type: "string" } attr { name: "file_random_seed" type: "int" default_value { i: 301 } } attr { name: "file_shuffle_shift_ratio" type: "float" default_value { f: 0.0 } } attr { name: "file_buffer_size" type: "int" default_value { i: 10000 } } attr { name: "file_parallelism" type: "int" default_value { i: 16 } } attr { name: "batch_size" type: "int" default_value { i: 32 } } attr { name: "compression_type" type: "string" default_value { s: "" } } -} -- | recvTPUEmbeddingActivations :: forall m' . (MonadBuild m') => Data.Int.Int64 -- ^ __num_outputs__ -> m' ([Tensor Value Float]) -- ^ __outputs__ recvTPUEmbeddingActivations = recvTPUEmbeddingActivations' id recvTPUEmbeddingActivations' :: forall m' . (MonadBuild m') => OpParams -> Data.Int.Int64 -- ^ __num_outputs__ -> m' ([Tensor Value Float]) -- ^ __outputs__ recvTPUEmbeddingActivations' op'options num_outputs | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [num_outputs] (opDef "RecvTPUEmbeddingActivations" & opAttr "num_outputs" .~ num_outputs & op'options & opInputs .~ op'inputs) {- output_arg { name: "outputs" type: DT_FLOAT number_attr: "num_outputs" } attr { name: "num_outputs" type: "int" has_minimum: true minimum: 1 } attr { name: "config" type: "string" } -} -- | reduceJoin :: Tensor v'1 Data.ByteString.ByteString -- ^ __inputs__ -> Tensor v'2 Data.Int.Int32 -- ^ __reduction_indices__ -> Tensor Build Data.ByteString.ByteString -- ^ __output__ reduceJoin = reduceJoin' id reduceJoin' :: OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __inputs__ -> Tensor v'2 Data.Int.Int32 -- ^ __reduction_indices__ -> Tensor Build Data.ByteString.ByteString -- ^ __output__ reduceJoin' op'options inputs reduction_indices | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs inputs, buildInputs reduction_indices] return (opDef "ReduceJoin" & op'options & opInputs .~ op'inputs) {- input_arg { name: "inputs" type: DT_STRING } input_arg { name: "reduction_indices" type: DT_INT32 } output_arg { name: "output" type: DT_STRING } attr { name: "keep_dims" type: "bool" default_value { b: false } } attr { name: "separator" type: "string" default_value { s: "" } } -} -- | refEnter :: forall t m' . (MonadBuild m', TensorType t) => Tensor Ref t -- ^ __data__ -> m' (Tensor Ref t) -- ^ __output__ refEnter = refEnter' id refEnter' :: forall t m' . (MonadBuild m', TensorType t) => OpParams -> Tensor Ref t -- ^ __data__ -> m' (Tensor Ref t) -- ^ __output__ refEnter' op'options data' | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs data'] buildOp [] (opDef "RefEnter" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "data" type_attr: "T" is_ref: true } output_arg { name: "output" type_attr: "T" is_ref: true } attr { name: "T" type: "type" } attr { name: "frame_name" type: "string" } attr { name: "is_constant" type: "bool" default_value { b: false } } attr { name: "parallel_iterations" type: "int" default_value { i: 10 } } -} -- | refExit :: forall t m' . (MonadBuild m', TensorType t) => Tensor Ref t -- ^ __data__ -> m' (Tensor Ref t) -- ^ __output__ refExit = refExit' id refExit' :: forall t m' . (MonadBuild m', TensorType t) => OpParams -> Tensor Ref t -- ^ __data__ -> m' (Tensor Ref t) -- ^ __output__ refExit' op'options data' | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs data'] buildOp [] (opDef "RefExit" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "data" type_attr: "T" is_ref: true } output_arg { name: "output" type_attr: "T" is_ref: true } attr { name: "T" type: "type" } -} -- | refIdentity :: forall t m' . (MonadBuild m', TensorType t) => Tensor Ref t -- ^ __input__ -> m' (Tensor Ref t) -- ^ __output__ refIdentity = refIdentity' id refIdentity' :: forall t m' . (MonadBuild m', TensorType t) => OpParams -> Tensor Ref t -- ^ __input__ -> m' (Tensor Ref t) -- ^ __output__ refIdentity' op'options input | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] buildOp [] (opDef "RefIdentity" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" is_ref: true } output_arg { name: "output" type_attr: "T" is_ref: true } attr { name: "T" type: "type" } -} -- | refMerge :: forall t m' . (MonadBuild m', TensorType t) => [Tensor Ref t] -- ^ __inputs__ -> m' ((Tensor Ref t, Tensor Value Data.Int.Int32)) -- ^ (__output__, __value_index__) -- -- * __output__ -- -- * __value_index__ refMerge = refMerge' id refMerge' :: forall t m' . (MonadBuild m', TensorType t) => OpParams -> [Tensor Ref t] -- ^ __inputs__ -> m' ((Tensor Ref t, Tensor Value Data.Int.Int32)) -- ^ (__output__, __value_index__) -- -- * __output__ -- -- * __value_index__ refMerge' op'options inputs | eqLengthGuard [("N", [("inputs", length inputs)])] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs inputs] buildOp [] (opDef "RefMerge" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "N" .~ n & op'options & opInputs .~ op'inputs) where n = fromIntegral (length inputs) :: Int64 {- input_arg { name: "inputs" type_attr: "T" number_attr: "N" is_ref: true } output_arg { name: "output" type_attr: "T" is_ref: true } output_arg { name: "value_index" type: DT_INT32 } attr { name: "T" type: "type" } attr { name: "N" type: "int" has_minimum: true minimum: 1 } -} -- | refNextIteration :: forall t m' . (MonadBuild m', TensorType t) => Tensor Ref t -- ^ __data__ -> m' (Tensor Ref t) -- ^ __output__ refNextIteration = refNextIteration' id refNextIteration' :: forall t m' . (MonadBuild m', TensorType t) => OpParams -> Tensor Ref t -- ^ __data__ -> m' (Tensor Ref t) -- ^ __output__ refNextIteration' op'options data' | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs data'] buildOp [] (opDef "RefNextIteration" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "data" type_attr: "T" is_ref: true } output_arg { name: "output" type_attr: "T" is_ref: true } attr { name: "T" type: "type" } -} -- | refSelect :: forall v'1 t m' . (MonadBuild m', TensorType t) => Tensor v'1 Data.Int.Int32 -- ^ __index__ -> [Tensor Ref t] -- ^ __inputs__ -> m' (Tensor Ref t) -- ^ __output__ refSelect = refSelect' id refSelect' :: forall v'1 t m' . (MonadBuild m', TensorType t) => OpParams -> Tensor v'1 Data.Int.Int32 -- ^ __index__ -> [Tensor Ref t] -- ^ __inputs__ -> m' (Tensor Ref t) -- ^ __output__ refSelect' op'options index inputs | eqLengthGuard [("N", [("inputs", length inputs)])] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs index, buildInputs inputs] buildOp [] (opDef "RefSelect" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "N" .~ n & op'options & opInputs .~ op'inputs) where n = fromIntegral (length inputs) :: Int64 {- input_arg { name: "index" type: DT_INT32 } input_arg { name: "inputs" type_attr: "T" number_attr: "N" is_ref: true } output_arg { name: "output" type_attr: "T" is_ref: true } attr { name: "T" type: "type" } attr { name: "N" type: "int" has_minimum: true minimum: 1 } -} -- | refSwitch :: forall v'2 t m' . (MonadBuild m', TensorType t) => Tensor Ref t -- ^ __data__ -> Tensor v'2 Bool -- ^ __pred__ -> m' ((Tensor Ref t, Tensor Ref t)) -- ^ (__output_false__, __output_true__) -- -- * __output_false__ -- -- * __output_true__ refSwitch = refSwitch' id refSwitch' :: forall v'2 t m' . (MonadBuild m', TensorType t) => OpParams -> Tensor Ref t -- ^ __data__ -> Tensor v'2 Bool -- ^ __pred__ -> m' ((Tensor Ref t, Tensor Ref t)) -- ^ (__output_false__, __output_true__) -- -- * __output_false__ -- -- * __output_true__ refSwitch' op'options data' pred | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs data', buildInputs pred] buildOp [] (opDef "RefSwitch" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "data" type_attr: "T" is_ref: true } input_arg { name: "pred" type: DT_BOOL } output_arg { name: "output_false" type_attr: "T" is_ref: true } output_arg { name: "output_true" type_attr: "T" is_ref: true } attr { name: "T" type: "type" } -} -- | regexFullMatch :: Tensor v'1 Data.ByteString.ByteString -- ^ __input__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __pattern__ -> Tensor Build Bool -- ^ __output__ regexFullMatch = regexFullMatch' id regexFullMatch' :: OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __input__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __pattern__ -> Tensor Build Bool -- ^ __output__ regexFullMatch' op'options input pattern | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs pattern] return (opDef "RegexFullMatch" & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type: DT_STRING } input_arg { name: "pattern" type: DT_STRING } output_arg { name: "output" type: DT_BOOL } -} -- | regexReplace :: Tensor v'1 Data.ByteString.ByteString -- ^ __input__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __pattern__ -> Tensor v'3 Data.ByteString.ByteString -- ^ __rewrite__ -> Tensor Build Data.ByteString.ByteString -- ^ __output__ regexReplace = regexReplace' id regexReplace' :: OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __input__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __pattern__ -> Tensor v'3 Data.ByteString.ByteString -- ^ __rewrite__ -> Tensor Build Data.ByteString.ByteString -- ^ __output__ regexReplace' op'options input pattern rewrite | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs pattern, buildInputs rewrite] return (opDef "RegexReplace" & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type: DT_STRING } input_arg { name: "pattern" type: DT_STRING } input_arg { name: "rewrite" type: DT_STRING } output_arg { name: "output" type: DT_STRING } attr { name: "replace_global" type: "bool" default_value { b: true } } -} -- | relu :: forall v'1 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __features__ -> Tensor Build t -- ^ __activations__ relu = relu' id relu' :: forall v'1 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __features__ -> Tensor Build t -- ^ __activations__ relu' op'options features | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs features] return (opDef "Relu" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "features" type_attr: "T" } output_arg { name: "activations" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_INT64 type: DT_BFLOAT16 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 type: DT_QINT8 } } } -} -- | relu6 :: forall v'1 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __features__ -> Tensor Build t -- ^ __activations__ relu6 = relu6' id relu6' :: forall v'1 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __features__ -> Tensor Build t -- ^ __activations__ relu6' op'options features | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs features] return (opDef "Relu6" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "features" type_attr: "T" } output_arg { name: "activations" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_INT64 type: DT_BFLOAT16 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } -} -- | relu6Grad :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __gradients__ -> Tensor v'2 t -- ^ __features__ -> Tensor Build t -- ^ __backprops__ relu6Grad = relu6Grad' id relu6Grad' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __gradients__ -> Tensor v'2 t -- ^ __features__ -> Tensor Build t -- ^ __backprops__ relu6Grad' op'options gradients features | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs gradients, buildInputs features] return (opDef "Relu6Grad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "gradients" type_attr: "T" } input_arg { name: "features" type_attr: "T" } output_arg { name: "backprops" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_INT64 type: DT_BFLOAT16 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } -} -- | reluGrad :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __gradients__ -> Tensor v'2 t -- ^ __features__ -> Tensor Build t -- ^ __backprops__ reluGrad = reluGrad' id reluGrad' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __gradients__ -> Tensor v'2 t -- ^ __features__ -> Tensor Build t -- ^ __backprops__ reluGrad' op'options gradients features | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs gradients, buildInputs features] return (opDef "ReluGrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "gradients" type_attr: "T" } input_arg { name: "features" type_attr: "T" } output_arg { name: "backprops" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_INT64 type: DT_BFLOAT16 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } -} -- | remoteFusedGraphExecute :: forall v'1 tinputs toutputs . (TensorTypes tinputs, TensorTypes toutputs) => TensorList (v'1) tinputs -- ^ __inputs__ -> TensorList (Build) toutputs -- ^ __outputs__ remoteFusedGraphExecute = remoteFusedGraphExecute' id remoteFusedGraphExecute' :: forall v'1 tinputs toutputs . (TensorTypes tinputs, TensorTypes toutputs) => OpParams -> TensorList (v'1) tinputs -- ^ __inputs__ -> TensorList (Build) toutputs -- ^ __outputs__ remoteFusedGraphExecute' op'options inputs | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs inputs] return (opDef "RemoteFusedGraphExecute" & opAttr "Tinputs" .~ fromTensorTypes (Proxy :: Proxy tinputs) & opAttr "Toutputs" .~ fromTensorTypes (Proxy :: Proxy toutputs) & op'options & opInputs .~ op'inputs) {- input_arg { name: "inputs" type_list_attr: "Tinputs" } output_arg { name: "outputs" type_list_attr: "Toutputs" } attr { name: "Tinputs" type: "list(type)" has_minimum: true } attr { name: "Toutputs" type: "list(type)" has_minimum: true } attr { name: "serialized_remote_fused_graph_execute_info" type: "string" } -} -- | repeatDataset :: [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor v'2 Data.Int.Int64 -- ^ __count__ -> Tensor Build Variant -- ^ __handle__ repeatDataset = repeatDataset' id repeatDataset' :: OpParams -> [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor v'2 Data.Int.Int64 -- ^ __count__ -> Tensor Build Variant -- ^ __handle__ repeatDataset' op'options output_types input_dataset count | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_dataset, buildInputs count] return (opDef "RepeatDataset" & opAttr "output_types" .~ output_types & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_dataset" type: DT_VARIANT } input_arg { name: "count" type: DT_INT64 } output_arg { name: "handle" type: DT_VARIANT } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } -} -- | requantizationRange :: forall v'1 v'2 v'3 tinput . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tinput) => Tensor v'1 tinput -- ^ __input__ -> Tensor v'2 Float -- ^ __input_min__ -> Tensor v'3 Float -- ^ __input_max__ -> (Tensor Build Float, Tensor Build Float) -- ^ (__output_min__, __output_max__) -- -- * __output_min__ -- -- * __output_max__ requantizationRange = requantizationRange' id requantizationRange' :: forall v'1 v'2 v'3 tinput . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tinput) => OpParams -> Tensor v'1 tinput -- ^ __input__ -> Tensor v'2 Float -- ^ __input_min__ -> Tensor v'3 Float -- ^ __input_max__ -> (Tensor Build Float, Tensor Build Float) -- ^ (__output_min__, __output_max__) -- -- * __output_min__ -- -- * __output_max__ requantizationRange' op'options input input_min input_max | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs input_min, buildInputs input_max] return (opDef "RequantizationRange" & opAttr "Tinput" .~ tensorType (undefined :: tinput) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "Tinput" } input_arg { name: "input_min" type: DT_FLOAT } input_arg { name: "input_max" type: DT_FLOAT } output_arg { name: "output_min" type: DT_FLOAT } output_arg { name: "output_max" type: DT_FLOAT } attr { name: "Tinput" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } -} -- | requantizationRangePerChannel :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] t) => Float -- ^ __clip_value_max__ -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 Float -- ^ __input_min__ -> Tensor v'3 Float -- ^ __input_max__ -> (Tensor Build Float, Tensor Build Float) -- ^ (__output_min__, __output_max__) -- -- * __output_min__ -- -- * __output_max__ requantizationRangePerChannel = requantizationRangePerChannel' id requantizationRangePerChannel' :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] t) => OpParams -> Float -- ^ __clip_value_max__ -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 Float -- ^ __input_min__ -> Tensor v'3 Float -- ^ __input_max__ -> (Tensor Build Float, Tensor Build Float) -- ^ (__output_min__, __output_max__) -- -- * __output_min__ -- -- * __output_max__ requantizationRangePerChannel' op'options clip_value_max input input_min input_max | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs input_min, buildInputs input_max] return (opDef "RequantizationRangePerChannel" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "clip_value_max" .~ clip_value_max & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "input_min" type: DT_FLOAT } input_arg { name: "input_max" type: DT_FLOAT } output_arg { name: "output_min" type: DT_FLOAT } output_arg { name: "output_max" type: DT_FLOAT } attr { name: "T" type: "type" default_value { type: DT_QINT32 } allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "clip_value_max" type: "float" } -} -- | requantize :: forall v'1 v'2 v'3 v'4 v'5 tinput out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tinput, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] out_type) => Tensor v'1 tinput -- ^ __input__ -> Tensor v'2 Float -- ^ __input_min__ -> Tensor v'3 Float -- ^ __input_max__ -> Tensor v'4 Float -- ^ __requested_output_min__ -> Tensor v'5 Float -- ^ __requested_output_max__ -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __output_min__, __output_max__) -- -- * __output__ -- -- * __output_min__ -- -- * __output_max__ requantize = requantize' id requantize' :: forall v'1 v'2 v'3 v'4 v'5 tinput out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] tinput, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] out_type) => OpParams -> Tensor v'1 tinput -- ^ __input__ -> Tensor v'2 Float -- ^ __input_min__ -> Tensor v'3 Float -- ^ __input_max__ -> Tensor v'4 Float -- ^ __requested_output_min__ -> Tensor v'5 Float -- ^ __requested_output_max__ -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __output_min__, __output_max__) -- -- * __output__ -- -- * __output_min__ -- -- * __output_max__ requantize' op'options input input_min input_max requested_output_min requested_output_max | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs input_min, buildInputs input_max, buildInputs requested_output_min, buildInputs requested_output_max] return (opDef "Requantize" & opAttr "Tinput" .~ tensorType (undefined :: tinput) & opAttr "out_type" .~ tensorType (undefined :: out_type) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "Tinput" } input_arg { name: "input_min" type: DT_FLOAT } input_arg { name: "input_max" type: DT_FLOAT } input_arg { name: "requested_output_min" type: DT_FLOAT } input_arg { name: "requested_output_max" type: DT_FLOAT } output_arg { name: "output" type_attr: "out_type" } output_arg { name: "output_min" type: DT_FLOAT } output_arg { name: "output_max" type: DT_FLOAT } attr { name: "Tinput" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "out_type" type: "type" allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } -} -- | requantizePerChannel :: forall v'1 v'2 v'3 v'4 v'5 t out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] t, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] out_type) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 Float -- ^ __input_min__ -> Tensor v'3 Float -- ^ __input_max__ -> Tensor v'4 Float -- ^ __requested_output_min__ -> Tensor v'5 Float -- ^ __requested_output_max__ -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __output_min__, __output_max__) -- -- * __output__ -- -- * __output_min__ -- -- * __output_max__ requantizePerChannel = requantizePerChannel' id requantizePerChannel' :: forall v'1 v'2 v'3 v'4 v'5 t out_type . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] t, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Word.Word16, Data.Word.Word8] out_type) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 Float -- ^ __input_min__ -> Tensor v'3 Float -- ^ __input_max__ -> Tensor v'4 Float -- ^ __requested_output_min__ -> Tensor v'5 Float -- ^ __requested_output_max__ -> (Tensor Build out_type, Tensor Build Float, Tensor Build Float) -- ^ (__output__, __output_min__, __output_max__) -- -- * __output__ -- -- * __output_min__ -- -- * __output_max__ requantizePerChannel' op'options input input_min input_max requested_output_min requested_output_max | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs input_min, buildInputs input_max, buildInputs requested_output_min, buildInputs requested_output_max] return (opDef "RequantizePerChannel" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "out_type" .~ tensorType (undefined :: out_type) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "input_min" type: DT_FLOAT } input_arg { name: "input_max" type: DT_FLOAT } input_arg { name: "requested_output_min" type: DT_FLOAT } input_arg { name: "requested_output_max" type: DT_FLOAT } output_arg { name: "output" type_attr: "out_type" } output_arg { name: "output_min" type: DT_FLOAT } output_arg { name: "output_max" type: DT_FLOAT } attr { name: "T" type: "type" default_value { type: DT_QINT32 } allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } attr { name: "out_type" type: "type" default_value { type: DT_QUINT8 } allowed_values { list { type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_QINT16 type: DT_QUINT16 } } } -} -- | reshape :: forall v'1 v'2 t tshape . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tshape) => Tensor v'1 t -- ^ __tensor__ -> Tensor v'2 tshape -- ^ __shape__ -> Tensor Build t -- ^ __output__ reshape = reshape' id reshape' :: forall v'1 v'2 t tshape . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tshape) => OpParams -> Tensor v'1 t -- ^ __tensor__ -> Tensor v'2 tshape -- ^ __shape__ -> Tensor Build t -- ^ __output__ reshape' op'options tensor shape | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs tensor, buildInputs shape] return (opDef "Reshape" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tshape" .~ tensorType (undefined :: tshape) & op'options & opInputs .~ op'inputs) {- input_arg { name: "tensor" type_attr: "T" } input_arg { name: "shape" type_attr: "Tshape" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "Tshape" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | resizeArea :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __images__ -> Tensor v'2 Data.Int.Int32 -- ^ __size__ -> Tensor Build Float -- ^ __resized_images__ resizeArea = resizeArea' id resizeArea' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __images__ -> Tensor v'2 Data.Int.Int32 -- ^ __size__ -> Tensor Build Float -- ^ __resized_images__ resizeArea' op'options images size | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs images, buildInputs size] return (opDef "ResizeArea" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "images" type_attr: "T" } input_arg { name: "size" type: DT_INT32 } output_arg { name: "resized_images" type: DT_FLOAT } attr { name: "T" type: "type" allowed_values { list { type: DT_INT8 type: DT_UINT8 type: DT_INT16 type: DT_UINT16 type: DT_INT32 type: DT_INT64 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "align_corners" type: "bool" default_value { b: false } } -} -- | resizeBicubic :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __images__ -> Tensor v'2 Data.Int.Int32 -- ^ __size__ -> Tensor Build Float -- ^ __resized_images__ resizeBicubic = resizeBicubic' id resizeBicubic' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __images__ -> Tensor v'2 Data.Int.Int32 -- ^ __size__ -> Tensor Build Float -- ^ __resized_images__ resizeBicubic' op'options images size | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs images, buildInputs size] return (opDef "ResizeBicubic" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "images" type_attr: "T" } input_arg { name: "size" type: DT_INT32 } output_arg { name: "resized_images" type: DT_FLOAT } attr { name: "T" type: "type" allowed_values { list { type: DT_INT8 type: DT_UINT8 type: DT_INT16 type: DT_UINT16 type: DT_INT32 type: DT_INT64 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "align_corners" type: "bool" default_value { b: false } } attr { name: "half_pixel_centers" type: "bool" default_value { b: false } } -} -- | resizeBicubicGrad :: forall v'1 v'2 t . (OneOf '[Double, Float] t) => Tensor v'1 Float -- ^ __grads__ -> Tensor v'2 t -- ^ __original_image__ -> Tensor Build t -- ^ __output__ resizeBicubicGrad = resizeBicubicGrad' id resizeBicubicGrad' :: forall v'1 v'2 t . (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 Float -- ^ __grads__ -> Tensor v'2 t -- ^ __original_image__ -> Tensor Build t -- ^ __output__ resizeBicubicGrad' op'options grads original_image | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs grads, buildInputs original_image] return (opDef "ResizeBicubicGrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "grads" type: DT_FLOAT } input_arg { name: "original_image" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "align_corners" type: "bool" default_value { b: false } } attr { name: "half_pixel_centers" type: "bool" default_value { b: false } } -} -- | resizeBilinear :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __images__ -> Tensor v'2 Data.Int.Int32 -- ^ __size__ -> Tensor Build Float -- ^ __resized_images__ resizeBilinear = resizeBilinear' id resizeBilinear' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __images__ -> Tensor v'2 Data.Int.Int32 -- ^ __size__ -> Tensor Build Float -- ^ __resized_images__ resizeBilinear' op'options images size | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs images, buildInputs size] return (opDef "ResizeBilinear" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "images" type_attr: "T" } input_arg { name: "size" type: DT_INT32 } output_arg { name: "resized_images" type: DT_FLOAT } attr { name: "T" type: "type" allowed_values { list { type: DT_INT8 type: DT_UINT8 type: DT_INT16 type: DT_UINT16 type: DT_INT32 type: DT_INT64 type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "align_corners" type: "bool" default_value { b: false } } attr { name: "half_pixel_centers" type: "bool" default_value { b: false } } -} -- | resizeBilinearGrad :: forall v'1 v'2 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 Float -- ^ __grads__ -> Tensor v'2 t -- ^ __original_image__ -> Tensor Build t -- ^ __output__ resizeBilinearGrad = resizeBilinearGrad' id resizeBilinearGrad' :: forall v'1 v'2 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 Float -- ^ __grads__ -> Tensor v'2 t -- ^ __original_image__ -> Tensor Build t -- ^ __output__ resizeBilinearGrad' op'options grads original_image | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs grads, buildInputs original_image] return (opDef "ResizeBilinearGrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "grads" type: DT_FLOAT } input_arg { name: "original_image" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_BFLOAT16 type: DT_HALF type: DT_DOUBLE } } } attr { name: "align_corners" type: "bool" default_value { b: false } } attr { name: "half_pixel_centers" type: "bool" default_value { b: false } } -} -- | resizeNearestNeighbor :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __images__ -> Tensor v'2 Data.Int.Int32 -- ^ __size__ -> Tensor Build t -- ^ __resized_images__ resizeNearestNeighbor = resizeNearestNeighbor' id resizeNearestNeighbor' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __images__ -> Tensor v'2 Data.Int.Int32 -- ^ __size__ -> Tensor Build t -- ^ __resized_images__ resizeNearestNeighbor' op'options images size | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs images, buildInputs size] return (opDef "ResizeNearestNeighbor" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "images" type_attr: "T" } input_arg { name: "size" type: DT_INT32 } output_arg { name: "resized_images" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_INT8 type: DT_UINT8 type: DT_INT16 type: DT_UINT16 type: DT_INT32 type: DT_INT64 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "align_corners" type: "bool" default_value { b: false } } attr { name: "half_pixel_centers" type: "bool" default_value { b: false } } -} -- | resizeNearestNeighborGrad :: forall v'1 v'2 t . (OneOf '[Data.Int.Int32, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __grads__ -> Tensor v'2 Data.Int.Int32 -- ^ __size__ -> Tensor Build t -- ^ __output__ resizeNearestNeighborGrad = resizeNearestNeighborGrad' id resizeNearestNeighborGrad' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int32, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __grads__ -> Tensor v'2 Data.Int.Int32 -- ^ __size__ -> Tensor Build t -- ^ __output__ resizeNearestNeighborGrad' op'options grads size | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs grads, buildInputs size] return (opDef "ResizeNearestNeighborGrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "grads" type_attr: "T" } input_arg { name: "size" type: DT_INT32 } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_UINT8 type: DT_INT8 type: DT_INT32 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "align_corners" type: "bool" default_value { b: false } } attr { name: "half_pixel_centers" type: "bool" default_value { b: false } } -} -- | resourceApplyAdaMax :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor v'1 ResourceHandle -- ^ __var__ -> Tensor v'2 ResourceHandle -- ^ __m__ -> Tensor v'3 ResourceHandle -- ^ __v__ -> Tensor v'4 t -- ^ __beta1_power__ -> Tensor v'5 t -- ^ __lr__ -> Tensor v'6 t -- ^ __beta1__ -> Tensor v'7 t -- ^ __beta2__ -> Tensor v'8 t -- ^ __epsilon__ -> Tensor v'9 t -- ^ __grad__ -> m' (ControlNode) resourceApplyAdaMax = resourceApplyAdaMax' id resourceApplyAdaMax' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 ResourceHandle -- ^ __var__ -> Tensor v'2 ResourceHandle -- ^ __m__ -> Tensor v'3 ResourceHandle -- ^ __v__ -> Tensor v'4 t -- ^ __beta1_power__ -> Tensor v'5 t -- ^ __lr__ -> Tensor v'6 t -- ^ __beta1__ -> Tensor v'7 t -- ^ __beta2__ -> Tensor v'8 t -- ^ __epsilon__ -> Tensor v'9 t -- ^ __grad__ -> m' (ControlNode) resourceApplyAdaMax' op'options var m v beta1_power lr beta1 beta2 epsilon grad | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs m, buildInputs v, buildInputs beta1_power, buildInputs lr, buildInputs beta1, buildInputs beta2, buildInputs epsilon, buildInputs grad] buildOp [] (opDef "ResourceApplyAdaMax" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" type: DT_RESOURCE } input_arg { name: "m" type: DT_RESOURCE } input_arg { name: "v" type: DT_RESOURCE } input_arg { name: "beta1_power" type_attr: "T" } input_arg { name: "lr" type_attr: "T" } input_arg { name: "beta1" type_attr: "T" } input_arg { name: "beta2" type_attr: "T" } input_arg { name: "epsilon" type_attr: "T" } input_arg { name: "grad" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } } -} -- | resourceApplyAdadelta :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor v'1 ResourceHandle -- ^ __var__ -> Tensor v'2 ResourceHandle -- ^ __accum__ -> Tensor v'3 ResourceHandle -- ^ __accum_update__ -> Tensor v'4 t -- ^ __lr__ -> Tensor v'5 t -- ^ __rho__ -> Tensor v'6 t -- ^ __epsilon__ -> Tensor v'7 t -- ^ __grad__ -> m' (ControlNode) resourceApplyAdadelta = resourceApplyAdadelta' id resourceApplyAdadelta' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 ResourceHandle -- ^ __var__ -> Tensor v'2 ResourceHandle -- ^ __accum__ -> Tensor v'3 ResourceHandle -- ^ __accum_update__ -> Tensor v'4 t -- ^ __lr__ -> Tensor v'5 t -- ^ __rho__ -> Tensor v'6 t -- ^ __epsilon__ -> Tensor v'7 t -- ^ __grad__ -> m' (ControlNode) resourceApplyAdadelta' op'options var accum accum_update lr rho epsilon grad | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs accum, buildInputs accum_update, buildInputs lr, buildInputs rho, buildInputs epsilon, buildInputs grad] buildOp [] (opDef "ResourceApplyAdadelta" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" type: DT_RESOURCE } input_arg { name: "accum" type: DT_RESOURCE } input_arg { name: "accum_update" type: DT_RESOURCE } input_arg { name: "lr" type_attr: "T" } input_arg { name: "rho" type_attr: "T" } input_arg { name: "epsilon" type_attr: "T" } input_arg { name: "grad" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } } -} -- | resourceApplyAdagrad :: forall v'1 v'2 v'3 v'4 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor v'1 ResourceHandle -- ^ __var__ -> Tensor v'2 ResourceHandle -- ^ __accum__ -> Tensor v'3 t -- ^ __lr__ -> Tensor v'4 t -- ^ __grad__ -> m' (ControlNode) resourceApplyAdagrad = resourceApplyAdagrad' id resourceApplyAdagrad' :: forall v'1 v'2 v'3 v'4 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 ResourceHandle -- ^ __var__ -> Tensor v'2 ResourceHandle -- ^ __accum__ -> Tensor v'3 t -- ^ __lr__ -> Tensor v'4 t -- ^ __grad__ -> m' (ControlNode) resourceApplyAdagrad' op'options var accum lr grad | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs accum, buildInputs lr, buildInputs grad] buildOp [] (opDef "ResourceApplyAdagrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" type: DT_RESOURCE } input_arg { name: "accum" type: DT_RESOURCE } input_arg { name: "lr" type_attr: "T" } input_arg { name: "grad" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } } attr { name: "update_slots" type: "bool" default_value { b: true } } -} -- | resourceApplyAdagradDA :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor v'1 ResourceHandle -- ^ __var__ -> Tensor v'2 ResourceHandle -- ^ __gradient_accumulator__ -> Tensor v'3 ResourceHandle -- ^ __gradient_squared_accumulator__ -> Tensor v'4 t -- ^ __grad__ -> Tensor v'5 t -- ^ __lr__ -> Tensor v'6 t -- ^ __l1__ -> Tensor v'7 t -- ^ __l2__ -> Tensor v'8 Data.Int.Int64 -- ^ __global_step__ -> m' (ControlNode) resourceApplyAdagradDA = resourceApplyAdagradDA' id resourceApplyAdagradDA' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 ResourceHandle -- ^ __var__ -> Tensor v'2 ResourceHandle -- ^ __gradient_accumulator__ -> Tensor v'3 ResourceHandle -- ^ __gradient_squared_accumulator__ -> Tensor v'4 t -- ^ __grad__ -> Tensor v'5 t -- ^ __lr__ -> Tensor v'6 t -- ^ __l1__ -> Tensor v'7 t -- ^ __l2__ -> Tensor v'8 Data.Int.Int64 -- ^ __global_step__ -> m' (ControlNode) resourceApplyAdagradDA' op'options var gradient_accumulator gradient_squared_accumulator grad lr l1 l2 global_step | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs gradient_accumulator, buildInputs gradient_squared_accumulator, buildInputs grad, buildInputs lr, buildInputs l1, buildInputs l2, buildInputs global_step] buildOp [] (opDef "ResourceApplyAdagradDA" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" type: DT_RESOURCE } input_arg { name: "gradient_accumulator" type: DT_RESOURCE } input_arg { name: "gradient_squared_accumulator" type: DT_RESOURCE } input_arg { name: "grad" type_attr: "T" } input_arg { name: "lr" type_attr: "T" } input_arg { name: "l1" type_attr: "T" } input_arg { name: "l2" type_attr: "T" } input_arg { name: "global_step" type: DT_INT64 } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } } -} -- | resourceApplyAdam :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 v'10 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor v'1 ResourceHandle -- ^ __var__ -> Tensor v'2 ResourceHandle -- ^ __m__ -> Tensor v'3 ResourceHandle -- ^ __v__ -> Tensor v'4 t -- ^ __beta1_power__ -> Tensor v'5 t -- ^ __beta2_power__ -> Tensor v'6 t -- ^ __lr__ -> Tensor v'7 t -- ^ __beta1__ -> Tensor v'8 t -- ^ __beta2__ -> Tensor v'9 t -- ^ __epsilon__ -> Tensor v'10 t -- ^ __grad__ -> m' (ControlNode) resourceApplyAdam = resourceApplyAdam' id resourceApplyAdam' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 v'10 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 ResourceHandle -- ^ __var__ -> Tensor v'2 ResourceHandle -- ^ __m__ -> Tensor v'3 ResourceHandle -- ^ __v__ -> Tensor v'4 t -- ^ __beta1_power__ -> Tensor v'5 t -- ^ __beta2_power__ -> Tensor v'6 t -- ^ __lr__ -> Tensor v'7 t -- ^ __beta1__ -> Tensor v'8 t -- ^ __beta2__ -> Tensor v'9 t -- ^ __epsilon__ -> Tensor v'10 t -- ^ __grad__ -> m' (ControlNode) resourceApplyAdam' op'options var m v beta1_power beta2_power lr beta1 beta2 epsilon grad | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs m, buildInputs v, buildInputs beta1_power, buildInputs beta2_power, buildInputs lr, buildInputs beta1, buildInputs beta2, buildInputs epsilon, buildInputs grad] buildOp [] (opDef "ResourceApplyAdam" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" type: DT_RESOURCE } input_arg { name: "m" type: DT_RESOURCE } input_arg { name: "v" type: DT_RESOURCE } input_arg { name: "beta1_power" type_attr: "T" } input_arg { name: "beta2_power" type_attr: "T" } input_arg { name: "lr" type_attr: "T" } input_arg { name: "beta1" type_attr: "T" } input_arg { name: "beta2" type_attr: "T" } input_arg { name: "epsilon" type_attr: "T" } input_arg { name: "grad" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } } attr { name: "use_nesterov" type: "bool" default_value { b: false } } -} -- | resourceApplyAdamWithAmsgrad :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 v'10 v'11 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor v'1 ResourceHandle -- ^ __var__ -> Tensor v'2 ResourceHandle -- ^ __m__ -> Tensor v'3 ResourceHandle -- ^ __v__ -> Tensor v'4 ResourceHandle -- ^ __vhat__ -> Tensor v'5 t -- ^ __beta1_power__ -> Tensor v'6 t -- ^ __beta2_power__ -> Tensor v'7 t -- ^ __lr__ -> Tensor v'8 t -- ^ __beta1__ -> Tensor v'9 t -- ^ __beta2__ -> Tensor v'10 t -- ^ __epsilon__ -> Tensor v'11 t -- ^ __grad__ -> m' (ControlNode) resourceApplyAdamWithAmsgrad = resourceApplyAdamWithAmsgrad' id resourceApplyAdamWithAmsgrad' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 v'10 v'11 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 ResourceHandle -- ^ __var__ -> Tensor v'2 ResourceHandle -- ^ __m__ -> Tensor v'3 ResourceHandle -- ^ __v__ -> Tensor v'4 ResourceHandle -- ^ __vhat__ -> Tensor v'5 t -- ^ __beta1_power__ -> Tensor v'6 t -- ^ __beta2_power__ -> Tensor v'7 t -- ^ __lr__ -> Tensor v'8 t -- ^ __beta1__ -> Tensor v'9 t -- ^ __beta2__ -> Tensor v'10 t -- ^ __epsilon__ -> Tensor v'11 t -- ^ __grad__ -> m' (ControlNode) resourceApplyAdamWithAmsgrad' op'options var m v vhat beta1_power beta2_power lr beta1 beta2 epsilon grad | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs m, buildInputs v, buildInputs vhat, buildInputs beta1_power, buildInputs beta2_power, buildInputs lr, buildInputs beta1, buildInputs beta2, buildInputs epsilon, buildInputs grad] buildOp [] (opDef "ResourceApplyAdamWithAmsgrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" type: DT_RESOURCE } input_arg { name: "m" type: DT_RESOURCE } input_arg { name: "v" type: DT_RESOURCE } input_arg { name: "vhat" type: DT_RESOURCE } input_arg { name: "beta1_power" type_attr: "T" } input_arg { name: "beta2_power" type_attr: "T" } input_arg { name: "lr" type_attr: "T" } input_arg { name: "beta1" type_attr: "T" } input_arg { name: "beta2" type_attr: "T" } input_arg { name: "epsilon" type_attr: "T" } input_arg { name: "grad" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } } -} -- | resourceApplyAddSign :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor v'1 ResourceHandle -- ^ __var__ -> Tensor v'2 ResourceHandle -- ^ __m__ -> Tensor v'3 t -- ^ __lr__ -> Tensor v'4 t -- ^ __alpha__ -> Tensor v'5 t -- ^ __sign_decay__ -> Tensor v'6 t -- ^ __beta__ -> Tensor v'7 t -- ^ __grad__ -> m' (ControlNode) resourceApplyAddSign = resourceApplyAddSign' id resourceApplyAddSign' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 ResourceHandle -- ^ __var__ -> Tensor v'2 ResourceHandle -- ^ __m__ -> Tensor v'3 t -- ^ __lr__ -> Tensor v'4 t -- ^ __alpha__ -> Tensor v'5 t -- ^ __sign_decay__ -> Tensor v'6 t -- ^ __beta__ -> Tensor v'7 t -- ^ __grad__ -> m' (ControlNode) resourceApplyAddSign' op'options var m lr alpha sign_decay beta grad | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs m, buildInputs lr, buildInputs alpha, buildInputs sign_decay, buildInputs beta, buildInputs grad] buildOp [] (opDef "ResourceApplyAddSign" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" type: DT_RESOURCE } input_arg { name: "m" type: DT_RESOURCE } input_arg { name: "lr" type_attr: "T" } input_arg { name: "alpha" type_attr: "T" } input_arg { name: "sign_decay" type_attr: "T" } input_arg { name: "beta" type_attr: "T" } input_arg { name: "grad" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } } -} -- | resourceApplyCenteredRMSProp :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor v'1 ResourceHandle -- ^ __var__ -> Tensor v'2 ResourceHandle -- ^ __mg__ -> Tensor v'3 ResourceHandle -- ^ __ms__ -> Tensor v'4 ResourceHandle -- ^ __mom__ -> Tensor v'5 t -- ^ __lr__ -> Tensor v'6 t -- ^ __rho__ -> Tensor v'7 t -- ^ __momentum__ -> Tensor v'8 t -- ^ __epsilon__ -> Tensor v'9 t -- ^ __grad__ -> m' (ControlNode) resourceApplyCenteredRMSProp = resourceApplyCenteredRMSProp' id resourceApplyCenteredRMSProp' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 ResourceHandle -- ^ __var__ -> Tensor v'2 ResourceHandle -- ^ __mg__ -> Tensor v'3 ResourceHandle -- ^ __ms__ -> Tensor v'4 ResourceHandle -- ^ __mom__ -> Tensor v'5 t -- ^ __lr__ -> Tensor v'6 t -- ^ __rho__ -> Tensor v'7 t -- ^ __momentum__ -> Tensor v'8 t -- ^ __epsilon__ -> Tensor v'9 t -- ^ __grad__ -> m' (ControlNode) resourceApplyCenteredRMSProp' op'options var mg ms mom lr rho momentum epsilon grad | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs mg, buildInputs ms, buildInputs mom, buildInputs lr, buildInputs rho, buildInputs momentum, buildInputs epsilon, buildInputs grad] buildOp [] (opDef "ResourceApplyCenteredRMSProp" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" type: DT_RESOURCE } input_arg { name: "mg" type: DT_RESOURCE } input_arg { name: "ms" type: DT_RESOURCE } input_arg { name: "mom" type: DT_RESOURCE } input_arg { name: "lr" type_attr: "T" } input_arg { name: "rho" type_attr: "T" } input_arg { name: "momentum" type_attr: "T" } input_arg { name: "epsilon" type_attr: "T" } input_arg { name: "grad" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } } -} -- | resourceApplyFtrl :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor v'1 ResourceHandle -- ^ __var__ -> Tensor v'2 ResourceHandle -- ^ __accum__ -> Tensor v'3 ResourceHandle -- ^ __linear__ -> Tensor v'4 t -- ^ __grad__ -> Tensor v'5 t -- ^ __lr__ -> Tensor v'6 t -- ^ __l1__ -> Tensor v'7 t -- ^ __l2__ -> Tensor v'8 t -- ^ __lr_power__ -> m' (ControlNode) resourceApplyFtrl = resourceApplyFtrl' id resourceApplyFtrl' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 ResourceHandle -- ^ __var__ -> Tensor v'2 ResourceHandle -- ^ __accum__ -> Tensor v'3 ResourceHandle -- ^ __linear__ -> Tensor v'4 t -- ^ __grad__ -> Tensor v'5 t -- ^ __lr__ -> Tensor v'6 t -- ^ __l1__ -> Tensor v'7 t -- ^ __l2__ -> Tensor v'8 t -- ^ __lr_power__ -> m' (ControlNode) resourceApplyFtrl' op'options var accum linear grad lr l1 l2 lr_power | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs accum, buildInputs linear, buildInputs grad, buildInputs lr, buildInputs l1, buildInputs l2, buildInputs lr_power] buildOp [] (opDef "ResourceApplyFtrl" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" type: DT_RESOURCE } input_arg { name: "accum" type: DT_RESOURCE } input_arg { name: "linear" type: DT_RESOURCE } input_arg { name: "grad" type_attr: "T" } input_arg { name: "lr" type_attr: "T" } input_arg { name: "l1" type_attr: "T" } input_arg { name: "l2" type_attr: "T" } input_arg { name: "lr_power" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } } -} -- | resourceApplyFtrlV2 :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor v'1 ResourceHandle -- ^ __var__ -> Tensor v'2 ResourceHandle -- ^ __accum__ -> Tensor v'3 ResourceHandle -- ^ __linear__ -> Tensor v'4 t -- ^ __grad__ -> Tensor v'5 t -- ^ __lr__ -> Tensor v'6 t -- ^ __l1__ -> Tensor v'7 t -- ^ __l2__ -> Tensor v'8 t -- ^ __l2_shrinkage__ -> Tensor v'9 t -- ^ __lr_power__ -> m' (ControlNode) resourceApplyFtrlV2 = resourceApplyFtrlV2' id resourceApplyFtrlV2' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 ResourceHandle -- ^ __var__ -> Tensor v'2 ResourceHandle -- ^ __accum__ -> Tensor v'3 ResourceHandle -- ^ __linear__ -> Tensor v'4 t -- ^ __grad__ -> Tensor v'5 t -- ^ __lr__ -> Tensor v'6 t -- ^ __l1__ -> Tensor v'7 t -- ^ __l2__ -> Tensor v'8 t -- ^ __l2_shrinkage__ -> Tensor v'9 t -- ^ __lr_power__ -> m' (ControlNode) resourceApplyFtrlV2' op'options var accum linear grad lr l1 l2 l2_shrinkage lr_power | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs accum, buildInputs linear, buildInputs grad, buildInputs lr, buildInputs l1, buildInputs l2, buildInputs l2_shrinkage, buildInputs lr_power] buildOp [] (opDef "ResourceApplyFtrlV2" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" type: DT_RESOURCE } input_arg { name: "accum" type: DT_RESOURCE } input_arg { name: "linear" type: DT_RESOURCE } input_arg { name: "grad" type_attr: "T" } input_arg { name: "lr" type_attr: "T" } input_arg { name: "l1" type_attr: "T" } input_arg { name: "l2" type_attr: "T" } input_arg { name: "l2_shrinkage" type_attr: "T" } input_arg { name: "lr_power" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } } -} -- | resourceApplyGradientDescent :: forall v'1 v'2 v'3 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor v'1 ResourceHandle -- ^ __var__ -> Tensor v'2 t -- ^ __alpha__ -> Tensor v'3 t -- ^ __delta__ -> m' (ControlNode) resourceApplyGradientDescent = resourceApplyGradientDescent' id resourceApplyGradientDescent' :: forall v'1 v'2 v'3 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 ResourceHandle -- ^ __var__ -> Tensor v'2 t -- ^ __alpha__ -> Tensor v'3 t -- ^ __delta__ -> m' (ControlNode) resourceApplyGradientDescent' op'options var alpha delta | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs alpha, buildInputs delta] buildOp [] (opDef "ResourceApplyGradientDescent" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" type: DT_RESOURCE } input_arg { name: "alpha" type_attr: "T" } input_arg { name: "delta" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } } -} -- | resourceApplyKerasMomentum :: forall v'1 v'2 v'3 v'4 v'5 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor v'1 ResourceHandle -- ^ __var__ -> Tensor v'2 ResourceHandle -- ^ __accum__ -> Tensor v'3 t -- ^ __lr__ -> Tensor v'4 t -- ^ __grad__ -> Tensor v'5 t -- ^ __momentum__ -> m' (ControlNode) resourceApplyKerasMomentum = resourceApplyKerasMomentum' id resourceApplyKerasMomentum' :: forall v'1 v'2 v'3 v'4 v'5 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 ResourceHandle -- ^ __var__ -> Tensor v'2 ResourceHandle -- ^ __accum__ -> Tensor v'3 t -- ^ __lr__ -> Tensor v'4 t -- ^ __grad__ -> Tensor v'5 t -- ^ __momentum__ -> m' (ControlNode) resourceApplyKerasMomentum' op'options var accum lr grad momentum | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs accum, buildInputs lr, buildInputs grad, buildInputs momentum] buildOp [] (opDef "ResourceApplyKerasMomentum" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" type: DT_RESOURCE } input_arg { name: "accum" type: DT_RESOURCE } input_arg { name: "lr" type_attr: "T" } input_arg { name: "grad" type_attr: "T" } input_arg { name: "momentum" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } } attr { name: "use_nesterov" type: "bool" default_value { b: false } } -} -- | resourceApplyMomentum :: forall v'1 v'2 v'3 v'4 v'5 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor v'1 ResourceHandle -- ^ __var__ -> Tensor v'2 ResourceHandle -- ^ __accum__ -> Tensor v'3 t -- ^ __lr__ -> Tensor v'4 t -- ^ __grad__ -> Tensor v'5 t -- ^ __momentum__ -> m' (ControlNode) resourceApplyMomentum = resourceApplyMomentum' id resourceApplyMomentum' :: forall v'1 v'2 v'3 v'4 v'5 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 ResourceHandle -- ^ __var__ -> Tensor v'2 ResourceHandle -- ^ __accum__ -> Tensor v'3 t -- ^ __lr__ -> Tensor v'4 t -- ^ __grad__ -> Tensor v'5 t -- ^ __momentum__ -> m' (ControlNode) resourceApplyMomentum' op'options var accum lr grad momentum | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs accum, buildInputs lr, buildInputs grad, buildInputs momentum] buildOp [] (opDef "ResourceApplyMomentum" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" type: DT_RESOURCE } input_arg { name: "accum" type: DT_RESOURCE } input_arg { name: "lr" type_attr: "T" } input_arg { name: "grad" type_attr: "T" } input_arg { name: "momentum" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } } attr { name: "use_nesterov" type: "bool" default_value { b: false } } -} -- | resourceApplyPowerSign :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor v'1 ResourceHandle -- ^ __var__ -> Tensor v'2 ResourceHandle -- ^ __m__ -> Tensor v'3 t -- ^ __lr__ -> Tensor v'4 t -- ^ __logbase__ -> Tensor v'5 t -- ^ __sign_decay__ -> Tensor v'6 t -- ^ __beta__ -> Tensor v'7 t -- ^ __grad__ -> m' (ControlNode) resourceApplyPowerSign = resourceApplyPowerSign' id resourceApplyPowerSign' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 ResourceHandle -- ^ __var__ -> Tensor v'2 ResourceHandle -- ^ __m__ -> Tensor v'3 t -- ^ __lr__ -> Tensor v'4 t -- ^ __logbase__ -> Tensor v'5 t -- ^ __sign_decay__ -> Tensor v'6 t -- ^ __beta__ -> Tensor v'7 t -- ^ __grad__ -> m' (ControlNode) resourceApplyPowerSign' op'options var m lr logbase sign_decay beta grad | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs m, buildInputs lr, buildInputs logbase, buildInputs sign_decay, buildInputs beta, buildInputs grad] buildOp [] (opDef "ResourceApplyPowerSign" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" type: DT_RESOURCE } input_arg { name: "m" type: DT_RESOURCE } input_arg { name: "lr" type_attr: "T" } input_arg { name: "logbase" type_attr: "T" } input_arg { name: "sign_decay" type_attr: "T" } input_arg { name: "beta" type_attr: "T" } input_arg { name: "grad" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } } -} -- | resourceApplyProximalAdagrad :: forall v'1 v'2 v'3 v'4 v'5 v'6 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor v'1 ResourceHandle -- ^ __var__ -> Tensor v'2 ResourceHandle -- ^ __accum__ -> Tensor v'3 t -- ^ __lr__ -> Tensor v'4 t -- ^ __l1__ -> Tensor v'5 t -- ^ __l2__ -> Tensor v'6 t -- ^ __grad__ -> m' (ControlNode) resourceApplyProximalAdagrad = resourceApplyProximalAdagrad' id resourceApplyProximalAdagrad' :: forall v'1 v'2 v'3 v'4 v'5 v'6 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 ResourceHandle -- ^ __var__ -> Tensor v'2 ResourceHandle -- ^ __accum__ -> Tensor v'3 t -- ^ __lr__ -> Tensor v'4 t -- ^ __l1__ -> Tensor v'5 t -- ^ __l2__ -> Tensor v'6 t -- ^ __grad__ -> m' (ControlNode) resourceApplyProximalAdagrad' op'options var accum lr l1 l2 grad | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs accum, buildInputs lr, buildInputs l1, buildInputs l2, buildInputs grad] buildOp [] (opDef "ResourceApplyProximalAdagrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" type: DT_RESOURCE } input_arg { name: "accum" type: DT_RESOURCE } input_arg { name: "lr" type_attr: "T" } input_arg { name: "l1" type_attr: "T" } input_arg { name: "l2" type_attr: "T" } input_arg { name: "grad" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } } -} -- | resourceApplyProximalGradientDescent :: forall v'1 v'2 v'3 v'4 v'5 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor v'1 ResourceHandle -- ^ __var__ -> Tensor v'2 t -- ^ __alpha__ -> Tensor v'3 t -- ^ __l1__ -> Tensor v'4 t -- ^ __l2__ -> Tensor v'5 t -- ^ __delta__ -> m' (ControlNode) resourceApplyProximalGradientDescent = resourceApplyProximalGradientDescent' id resourceApplyProximalGradientDescent' :: forall v'1 v'2 v'3 v'4 v'5 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 ResourceHandle -- ^ __var__ -> Tensor v'2 t -- ^ __alpha__ -> Tensor v'3 t -- ^ __l1__ -> Tensor v'4 t -- ^ __l2__ -> Tensor v'5 t -- ^ __delta__ -> m' (ControlNode) resourceApplyProximalGradientDescent' op'options var alpha l1 l2 delta | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs alpha, buildInputs l1, buildInputs l2, buildInputs delta] buildOp [] (opDef "ResourceApplyProximalGradientDescent" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" type: DT_RESOURCE } input_arg { name: "alpha" type_attr: "T" } input_arg { name: "l1" type_attr: "T" } input_arg { name: "l2" type_attr: "T" } input_arg { name: "delta" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } } -} -- | resourceApplyRMSProp :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor v'1 ResourceHandle -- ^ __var__ -> Tensor v'2 ResourceHandle -- ^ __ms__ -> Tensor v'3 ResourceHandle -- ^ __mom__ -> Tensor v'4 t -- ^ __lr__ -> Tensor v'5 t -- ^ __rho__ -> Tensor v'6 t -- ^ __momentum__ -> Tensor v'7 t -- ^ __epsilon__ -> Tensor v'8 t -- ^ __grad__ -> m' (ControlNode) resourceApplyRMSProp = resourceApplyRMSProp' id resourceApplyRMSProp' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 t m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 ResourceHandle -- ^ __var__ -> Tensor v'2 ResourceHandle -- ^ __ms__ -> Tensor v'3 ResourceHandle -- ^ __mom__ -> Tensor v'4 t -- ^ __lr__ -> Tensor v'5 t -- ^ __rho__ -> Tensor v'6 t -- ^ __momentum__ -> Tensor v'7 t -- ^ __epsilon__ -> Tensor v'8 t -- ^ __grad__ -> m' (ControlNode) resourceApplyRMSProp' op'options var ms mom lr rho momentum epsilon grad | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs ms, buildInputs mom, buildInputs lr, buildInputs rho, buildInputs momentum, buildInputs epsilon, buildInputs grad] buildOp [] (opDef "ResourceApplyRMSProp" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" type: DT_RESOURCE } input_arg { name: "ms" type: DT_RESOURCE } input_arg { name: "mom" type: DT_RESOURCE } input_arg { name: "lr" type_attr: "T" } input_arg { name: "rho" type_attr: "T" } input_arg { name: "momentum" type_attr: "T" } input_arg { name: "epsilon" type_attr: "T" } input_arg { name: "grad" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } } -} -- | resourceCountUpTo :: forall v'1 t m' . (MonadBuild m', OneOf '[Data.Int.Int32, Data.Int.Int64] t) => Data.Int.Int64 -- ^ __limit__ -> Tensor v'1 ResourceHandle -- ^ __resource__ -> m' (Tensor Value t) -- ^ __output__ resourceCountUpTo = resourceCountUpTo' id resourceCountUpTo' :: forall v'1 t m' . (MonadBuild m', OneOf '[Data.Int.Int32, Data.Int.Int64] t) => OpParams -> Data.Int.Int64 -- ^ __limit__ -> Tensor v'1 ResourceHandle -- ^ __resource__ -> m' (Tensor Value t) -- ^ __output__ resourceCountUpTo' op'options limit resource | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs resource] buildOp [] (opDef "ResourceCountUpTo" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "limit" .~ limit & op'options & opInputs .~ op'inputs) {- input_arg { name: "resource" type: DT_RESOURCE } output_arg { name: "output" type_attr: "T" } attr { name: "limit" type: "int" } attr { name: "T" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | resourceGather :: forall v'1 v'2 dtype tindices m' . (MonadBuild m', TensorType dtype, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor v'1 ResourceHandle -- ^ __resource__ -> Tensor v'2 tindices -- ^ __indices__ -> m' (Tensor Value dtype) -- ^ __output__ resourceGather = resourceGather' id resourceGather' :: forall v'1 v'2 dtype tindices m' . (MonadBuild m', TensorType dtype, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor v'1 ResourceHandle -- ^ __resource__ -> Tensor v'2 tindices -- ^ __indices__ -> m' (Tensor Value dtype) -- ^ __output__ resourceGather' op'options resource indices | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs resource, buildInputs indices] buildOp [] (opDef "ResourceGather" & opAttr "dtype" .~ tensorType (undefined :: dtype) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "resource" type: DT_RESOURCE } input_arg { name: "indices" type_attr: "Tindices" } output_arg { name: "output" type_attr: "dtype" } attr { name: "batch_dims" type: "int" default_value { i: 0 } } attr { name: "validate_indices" type: "bool" default_value { b: true } } attr { name: "dtype" type: "type" } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | resourceGatherNd :: forall v'1 v'2 dtype tindices m' . (MonadBuild m', TensorType dtype, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor v'1 ResourceHandle -- ^ __resource__ -> Tensor v'2 tindices -- ^ __indices__ -> m' (Tensor Value dtype) -- ^ __output__ resourceGatherNd = resourceGatherNd' id resourceGatherNd' :: forall v'1 v'2 dtype tindices m' . (MonadBuild m', TensorType dtype, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor v'1 ResourceHandle -- ^ __resource__ -> Tensor v'2 tindices -- ^ __indices__ -> m' (Tensor Value dtype) -- ^ __output__ resourceGatherNd' op'options resource indices | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs resource, buildInputs indices] buildOp [] (opDef "ResourceGatherNd" & opAttr "dtype" .~ tensorType (undefined :: dtype) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "resource" type: DT_RESOURCE } input_arg { name: "indices" type_attr: "Tindices" } output_arg { name: "output" type_attr: "dtype" } attr { name: "dtype" type: "type" } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | resourceScatterAdd :: forall v'1 v'2 v'3 dtype tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] dtype, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor v'1 ResourceHandle -- ^ __resource__ -> Tensor v'2 tindices -- ^ __indices__ -> Tensor v'3 dtype -- ^ __updates__ -> m' (ControlNode) resourceScatterAdd = resourceScatterAdd' id resourceScatterAdd' :: forall v'1 v'2 v'3 dtype tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] dtype, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor v'1 ResourceHandle -- ^ __resource__ -> Tensor v'2 tindices -- ^ __indices__ -> Tensor v'3 dtype -- ^ __updates__ -> m' (ControlNode) resourceScatterAdd' op'options resource indices updates | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs resource, buildInputs indices, buildInputs updates] buildOp [] (opDef "ResourceScatterAdd" & opAttr "dtype" .~ tensorType (undefined :: dtype) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "resource" type: DT_RESOURCE } input_arg { name: "indices" type_attr: "Tindices" } input_arg { name: "updates" type_attr: "dtype" } attr { name: "dtype" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | resourceScatterDiv :: forall v'1 v'2 v'3 dtype tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] dtype, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor v'1 ResourceHandle -- ^ __resource__ -> Tensor v'2 tindices -- ^ __indices__ -> Tensor v'3 dtype -- ^ __updates__ -> m' (ControlNode) resourceScatterDiv = resourceScatterDiv' id resourceScatterDiv' :: forall v'1 v'2 v'3 dtype tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] dtype, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor v'1 ResourceHandle -- ^ __resource__ -> Tensor v'2 tindices -- ^ __indices__ -> Tensor v'3 dtype -- ^ __updates__ -> m' (ControlNode) resourceScatterDiv' op'options resource indices updates | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs resource, buildInputs indices, buildInputs updates] buildOp [] (opDef "ResourceScatterDiv" & opAttr "dtype" .~ tensorType (undefined :: dtype) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "resource" type: DT_RESOURCE } input_arg { name: "indices" type_attr: "Tindices" } input_arg { name: "updates" type_attr: "dtype" } attr { name: "dtype" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | resourceScatterMax :: forall v'1 v'2 v'3 dtype tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] dtype, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor v'1 ResourceHandle -- ^ __resource__ -> Tensor v'2 tindices -- ^ __indices__ -> Tensor v'3 dtype -- ^ __updates__ -> m' (ControlNode) resourceScatterMax = resourceScatterMax' id resourceScatterMax' :: forall v'1 v'2 v'3 dtype tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] dtype, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor v'1 ResourceHandle -- ^ __resource__ -> Tensor v'2 tindices -- ^ __indices__ -> Tensor v'3 dtype -- ^ __updates__ -> m' (ControlNode) resourceScatterMax' op'options resource indices updates | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs resource, buildInputs indices, buildInputs updates] buildOp [] (opDef "ResourceScatterMax" & opAttr "dtype" .~ tensorType (undefined :: dtype) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "resource" type: DT_RESOURCE } input_arg { name: "indices" type_attr: "Tindices" } input_arg { name: "updates" type_attr: "dtype" } attr { name: "dtype" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | resourceScatterMin :: forall v'1 v'2 v'3 dtype tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] dtype, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor v'1 ResourceHandle -- ^ __resource__ -> Tensor v'2 tindices -- ^ __indices__ -> Tensor v'3 dtype -- ^ __updates__ -> m' (ControlNode) resourceScatterMin = resourceScatterMin' id resourceScatterMin' :: forall v'1 v'2 v'3 dtype tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] dtype, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor v'1 ResourceHandle -- ^ __resource__ -> Tensor v'2 tindices -- ^ __indices__ -> Tensor v'3 dtype -- ^ __updates__ -> m' (ControlNode) resourceScatterMin' op'options resource indices updates | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs resource, buildInputs indices, buildInputs updates] buildOp [] (opDef "ResourceScatterMin" & opAttr "dtype" .~ tensorType (undefined :: dtype) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "resource" type: DT_RESOURCE } input_arg { name: "indices" type_attr: "Tindices" } input_arg { name: "updates" type_attr: "dtype" } attr { name: "dtype" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | resourceScatterMul :: forall v'1 v'2 v'3 dtype tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] dtype, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor v'1 ResourceHandle -- ^ __resource__ -> Tensor v'2 tindices -- ^ __indices__ -> Tensor v'3 dtype -- ^ __updates__ -> m' (ControlNode) resourceScatterMul = resourceScatterMul' id resourceScatterMul' :: forall v'1 v'2 v'3 dtype tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] dtype, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor v'1 ResourceHandle -- ^ __resource__ -> Tensor v'2 tindices -- ^ __indices__ -> Tensor v'3 dtype -- ^ __updates__ -> m' (ControlNode) resourceScatterMul' op'options resource indices updates | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs resource, buildInputs indices, buildInputs updates] buildOp [] (opDef "ResourceScatterMul" & opAttr "dtype" .~ tensorType (undefined :: dtype) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "resource" type: DT_RESOURCE } input_arg { name: "indices" type_attr: "Tindices" } input_arg { name: "updates" type_attr: "dtype" } attr { name: "dtype" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | resourceScatterNdAdd :: forall v'1 v'2 v'3 t tindices m' . (MonadBuild m', TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor v'1 ResourceHandle -- ^ __ref__ -> Tensor v'2 tindices -- ^ __indices__ -> Tensor v'3 t -- ^ __updates__ -> m' (ControlNode) resourceScatterNdAdd = resourceScatterNdAdd' id resourceScatterNdAdd' :: forall v'1 v'2 v'3 t tindices m' . (MonadBuild m', TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor v'1 ResourceHandle -- ^ __ref__ -> Tensor v'2 tindices -- ^ __indices__ -> Tensor v'3 t -- ^ __updates__ -> m' (ControlNode) resourceScatterNdAdd' op'options ref indices updates | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs ref, buildInputs indices, buildInputs updates] buildOp [] (opDef "ResourceScatterNdAdd" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "ref" type: DT_RESOURCE } input_arg { name: "indices" type_attr: "Tindices" } input_arg { name: "updates" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: true } } -} -- | resourceScatterNdSub :: forall v'1 v'2 v'3 t tindices m' . (MonadBuild m', TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor v'1 ResourceHandle -- ^ __ref__ -> Tensor v'2 tindices -- ^ __indices__ -> Tensor v'3 t -- ^ __updates__ -> m' (ControlNode) resourceScatterNdSub = resourceScatterNdSub' id resourceScatterNdSub' :: forall v'1 v'2 v'3 t tindices m' . (MonadBuild m', TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor v'1 ResourceHandle -- ^ __ref__ -> Tensor v'2 tindices -- ^ __indices__ -> Tensor v'3 t -- ^ __updates__ -> m' (ControlNode) resourceScatterNdSub' op'options ref indices updates | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs ref, buildInputs indices, buildInputs updates] buildOp [] (opDef "ResourceScatterNdSub" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "ref" type: DT_RESOURCE } input_arg { name: "indices" type_attr: "Tindices" } input_arg { name: "updates" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: true } } -} -- | resourceScatterNdUpdate :: forall v'1 v'2 v'3 t tindices m' . (MonadBuild m', TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor v'1 ResourceHandle -- ^ __ref__ -> Tensor v'2 tindices -- ^ __indices__ -> Tensor v'3 t -- ^ __updates__ -> m' (ControlNode) resourceScatterNdUpdate = resourceScatterNdUpdate' id resourceScatterNdUpdate' :: forall v'1 v'2 v'3 t tindices m' . (MonadBuild m', TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor v'1 ResourceHandle -- ^ __ref__ -> Tensor v'2 tindices -- ^ __indices__ -> Tensor v'3 t -- ^ __updates__ -> m' (ControlNode) resourceScatterNdUpdate' op'options ref indices updates | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs ref, buildInputs indices, buildInputs updates] buildOp [] (opDef "ResourceScatterNdUpdate" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "ref" type: DT_RESOURCE } input_arg { name: "indices" type_attr: "Tindices" } input_arg { name: "updates" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: true } } -} -- | resourceScatterSub :: forall v'1 v'2 v'3 dtype tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] dtype, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor v'1 ResourceHandle -- ^ __resource__ -> Tensor v'2 tindices -- ^ __indices__ -> Tensor v'3 dtype -- ^ __updates__ -> m' (ControlNode) resourceScatterSub = resourceScatterSub' id resourceScatterSub' :: forall v'1 v'2 v'3 dtype tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] dtype, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor v'1 ResourceHandle -- ^ __resource__ -> Tensor v'2 tindices -- ^ __indices__ -> Tensor v'3 dtype -- ^ __updates__ -> m' (ControlNode) resourceScatterSub' op'options resource indices updates | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs resource, buildInputs indices, buildInputs updates] buildOp [] (opDef "ResourceScatterSub" & opAttr "dtype" .~ tensorType (undefined :: dtype) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "resource" type: DT_RESOURCE } input_arg { name: "indices" type_attr: "Tindices" } input_arg { name: "updates" type_attr: "dtype" } attr { name: "dtype" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | resourceScatterUpdate :: forall v'1 v'2 v'3 dtype tindices m' . (MonadBuild m', TensorType dtype, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor v'1 ResourceHandle -- ^ __resource__ -> Tensor v'2 tindices -- ^ __indices__ -> Tensor v'3 dtype -- ^ __updates__ -> m' (ControlNode) resourceScatterUpdate = resourceScatterUpdate' id resourceScatterUpdate' :: forall v'1 v'2 v'3 dtype tindices m' . (MonadBuild m', TensorType dtype, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor v'1 ResourceHandle -- ^ __resource__ -> Tensor v'2 tindices -- ^ __indices__ -> Tensor v'3 dtype -- ^ __updates__ -> m' (ControlNode) resourceScatterUpdate' op'options resource indices updates | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs resource, buildInputs indices, buildInputs updates] buildOp [] (opDef "ResourceScatterUpdate" & opAttr "dtype" .~ tensorType (undefined :: dtype) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "resource" type: DT_RESOURCE } input_arg { name: "indices" type_attr: "Tindices" } input_arg { name: "updates" type_attr: "dtype" } attr { name: "dtype" type: "type" } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | resourceSparseApplyAdadelta :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor v'1 ResourceHandle -- ^ __var__ -> Tensor v'2 ResourceHandle -- ^ __accum__ -> Tensor v'3 ResourceHandle -- ^ __accum_update__ -> Tensor v'4 t -- ^ __lr__ -> Tensor v'5 t -- ^ __rho__ -> Tensor v'6 t -- ^ __epsilon__ -> Tensor v'7 t -- ^ __grad__ -> Tensor v'8 tindices -- ^ __indices__ -> m' (ControlNode) resourceSparseApplyAdadelta = resourceSparseApplyAdadelta' id resourceSparseApplyAdadelta' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor v'1 ResourceHandle -- ^ __var__ -> Tensor v'2 ResourceHandle -- ^ __accum__ -> Tensor v'3 ResourceHandle -- ^ __accum_update__ -> Tensor v'4 t -- ^ __lr__ -> Tensor v'5 t -- ^ __rho__ -> Tensor v'6 t -- ^ __epsilon__ -> Tensor v'7 t -- ^ __grad__ -> Tensor v'8 tindices -- ^ __indices__ -> m' (ControlNode) resourceSparseApplyAdadelta' op'options var accum accum_update lr rho epsilon grad indices | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs accum, buildInputs accum_update, buildInputs lr, buildInputs rho, buildInputs epsilon, buildInputs grad, buildInputs indices] buildOp [] (opDef "ResourceSparseApplyAdadelta" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" type: DT_RESOURCE } input_arg { name: "accum" type: DT_RESOURCE } input_arg { name: "accum_update" type: DT_RESOURCE } input_arg { name: "lr" type_attr: "T" } input_arg { name: "rho" type_attr: "T" } input_arg { name: "epsilon" type_attr: "T" } input_arg { name: "grad" type_attr: "T" } input_arg { name: "indices" type_attr: "Tindices" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } } -} -- | resourceSparseApplyAdagrad :: forall v'1 v'2 v'3 v'4 v'5 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor v'1 ResourceHandle -- ^ __var__ -> Tensor v'2 ResourceHandle -- ^ __accum__ -> Tensor v'3 t -- ^ __lr__ -> Tensor v'4 t -- ^ __grad__ -> Tensor v'5 tindices -- ^ __indices__ -> m' (ControlNode) resourceSparseApplyAdagrad = resourceSparseApplyAdagrad' id resourceSparseApplyAdagrad' :: forall v'1 v'2 v'3 v'4 v'5 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor v'1 ResourceHandle -- ^ __var__ -> Tensor v'2 ResourceHandle -- ^ __accum__ -> Tensor v'3 t -- ^ __lr__ -> Tensor v'4 t -- ^ __grad__ -> Tensor v'5 tindices -- ^ __indices__ -> m' (ControlNode) resourceSparseApplyAdagrad' op'options var accum lr grad indices | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs accum, buildInputs lr, buildInputs grad, buildInputs indices] buildOp [] (opDef "ResourceSparseApplyAdagrad" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" type: DT_RESOURCE } input_arg { name: "accum" type: DT_RESOURCE } input_arg { name: "lr" type_attr: "T" } input_arg { name: "grad" type_attr: "T" } input_arg { name: "indices" type_attr: "Tindices" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } } attr { name: "update_slots" type: "bool" default_value { b: true } } -} -- | resourceSparseApplyAdagradDA :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor v'1 ResourceHandle -- ^ __var__ -> Tensor v'2 ResourceHandle -- ^ __gradient_accumulator__ -> Tensor v'3 ResourceHandle -- ^ __gradient_squared_accumulator__ -> Tensor v'4 t -- ^ __grad__ -> Tensor v'5 tindices -- ^ __indices__ -> Tensor v'6 t -- ^ __lr__ -> Tensor v'7 t -- ^ __l1__ -> Tensor v'8 t -- ^ __l2__ -> Tensor v'9 Data.Int.Int64 -- ^ __global_step__ -> m' (ControlNode) resourceSparseApplyAdagradDA = resourceSparseApplyAdagradDA' id resourceSparseApplyAdagradDA' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor v'1 ResourceHandle -- ^ __var__ -> Tensor v'2 ResourceHandle -- ^ __gradient_accumulator__ -> Tensor v'3 ResourceHandle -- ^ __gradient_squared_accumulator__ -> Tensor v'4 t -- ^ __grad__ -> Tensor v'5 tindices -- ^ __indices__ -> Tensor v'6 t -- ^ __lr__ -> Tensor v'7 t -- ^ __l1__ -> Tensor v'8 t -- ^ __l2__ -> Tensor v'9 Data.Int.Int64 -- ^ __global_step__ -> m' (ControlNode) resourceSparseApplyAdagradDA' op'options var gradient_accumulator gradient_squared_accumulator grad indices lr l1 l2 global_step | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs gradient_accumulator, buildInputs gradient_squared_accumulator, buildInputs grad, buildInputs indices, buildInputs lr, buildInputs l1, buildInputs l2, buildInputs global_step] buildOp [] (opDef "ResourceSparseApplyAdagradDA" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" type: DT_RESOURCE } input_arg { name: "gradient_accumulator" type: DT_RESOURCE } input_arg { name: "gradient_squared_accumulator" type: DT_RESOURCE } input_arg { name: "grad" type_attr: "T" } input_arg { name: "indices" type_attr: "Tindices" } input_arg { name: "lr" type_attr: "T" } input_arg { name: "l1" type_attr: "T" } input_arg { name: "l2" type_attr: "T" } input_arg { name: "global_step" type: DT_INT64 } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } } -} -- | resourceSparseApplyCenteredRMSProp :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 v'10 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor v'1 ResourceHandle -- ^ __var__ -> Tensor v'2 ResourceHandle -- ^ __mg__ -> Tensor v'3 ResourceHandle -- ^ __ms__ -> Tensor v'4 ResourceHandle -- ^ __mom__ -> Tensor v'5 t -- ^ __lr__ -> Tensor v'6 t -- ^ __rho__ -> Tensor v'7 t -- ^ __momentum__ -> Tensor v'8 t -- ^ __epsilon__ -> Tensor v'9 t -- ^ __grad__ -> Tensor v'10 tindices -- ^ __indices__ -> m' (ControlNode) resourceSparseApplyCenteredRMSProp = resourceSparseApplyCenteredRMSProp' id resourceSparseApplyCenteredRMSProp' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 v'10 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor v'1 ResourceHandle -- ^ __var__ -> Tensor v'2 ResourceHandle -- ^ __mg__ -> Tensor v'3 ResourceHandle -- ^ __ms__ -> Tensor v'4 ResourceHandle -- ^ __mom__ -> Tensor v'5 t -- ^ __lr__ -> Tensor v'6 t -- ^ __rho__ -> Tensor v'7 t -- ^ __momentum__ -> Tensor v'8 t -- ^ __epsilon__ -> Tensor v'9 t -- ^ __grad__ -> Tensor v'10 tindices -- ^ __indices__ -> m' (ControlNode) resourceSparseApplyCenteredRMSProp' op'options var mg ms mom lr rho momentum epsilon grad indices | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs mg, buildInputs ms, buildInputs mom, buildInputs lr, buildInputs rho, buildInputs momentum, buildInputs epsilon, buildInputs grad, buildInputs indices] buildOp [] (opDef "ResourceSparseApplyCenteredRMSProp" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" type: DT_RESOURCE } input_arg { name: "mg" type: DT_RESOURCE } input_arg { name: "ms" type: DT_RESOURCE } input_arg { name: "mom" type: DT_RESOURCE } input_arg { name: "lr" type_attr: "T" } input_arg { name: "rho" type_attr: "T" } input_arg { name: "momentum" type_attr: "T" } input_arg { name: "epsilon" type_attr: "T" } input_arg { name: "grad" type_attr: "T" } input_arg { name: "indices" type_attr: "Tindices" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } } -} -- | resourceSparseApplyFtrl :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor v'1 ResourceHandle -- ^ __var__ -> Tensor v'2 ResourceHandle -- ^ __accum__ -> Tensor v'3 ResourceHandle -- ^ __linear__ -> Tensor v'4 t -- ^ __grad__ -> Tensor v'5 tindices -- ^ __indices__ -> Tensor v'6 t -- ^ __lr__ -> Tensor v'7 t -- ^ __l1__ -> Tensor v'8 t -- ^ __l2__ -> Tensor v'9 t -- ^ __lr_power__ -> m' (ControlNode) resourceSparseApplyFtrl = resourceSparseApplyFtrl' id resourceSparseApplyFtrl' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor v'1 ResourceHandle -- ^ __var__ -> Tensor v'2 ResourceHandle -- ^ __accum__ -> Tensor v'3 ResourceHandle -- ^ __linear__ -> Tensor v'4 t -- ^ __grad__ -> Tensor v'5 tindices -- ^ __indices__ -> Tensor v'6 t -- ^ __lr__ -> Tensor v'7 t -- ^ __l1__ -> Tensor v'8 t -- ^ __l2__ -> Tensor v'9 t -- ^ __lr_power__ -> m' (ControlNode) resourceSparseApplyFtrl' op'options var accum linear grad indices lr l1 l2 lr_power | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs accum, buildInputs linear, buildInputs grad, buildInputs indices, buildInputs lr, buildInputs l1, buildInputs l2, buildInputs lr_power] buildOp [] (opDef "ResourceSparseApplyFtrl" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" type: DT_RESOURCE } input_arg { name: "accum" type: DT_RESOURCE } input_arg { name: "linear" type: DT_RESOURCE } input_arg { name: "grad" type_attr: "T" } input_arg { name: "indices" type_attr: "Tindices" } input_arg { name: "lr" type_attr: "T" } input_arg { name: "l1" type_attr: "T" } input_arg { name: "l2" type_attr: "T" } input_arg { name: "lr_power" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } } -} -- | resourceSparseApplyFtrlV2 :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 v'10 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor v'1 ResourceHandle -- ^ __var__ -> Tensor v'2 ResourceHandle -- ^ __accum__ -> Tensor v'3 ResourceHandle -- ^ __linear__ -> Tensor v'4 t -- ^ __grad__ -> Tensor v'5 tindices -- ^ __indices__ -> Tensor v'6 t -- ^ __lr__ -> Tensor v'7 t -- ^ __l1__ -> Tensor v'8 t -- ^ __l2__ -> Tensor v'9 t -- ^ __l2_shrinkage__ -> Tensor v'10 t -- ^ __lr_power__ -> m' (ControlNode) resourceSparseApplyFtrlV2 = resourceSparseApplyFtrlV2' id resourceSparseApplyFtrlV2' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 v'10 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor v'1 ResourceHandle -- ^ __var__ -> Tensor v'2 ResourceHandle -- ^ __accum__ -> Tensor v'3 ResourceHandle -- ^ __linear__ -> Tensor v'4 t -- ^ __grad__ -> Tensor v'5 tindices -- ^ __indices__ -> Tensor v'6 t -- ^ __lr__ -> Tensor v'7 t -- ^ __l1__ -> Tensor v'8 t -- ^ __l2__ -> Tensor v'9 t -- ^ __l2_shrinkage__ -> Tensor v'10 t -- ^ __lr_power__ -> m' (ControlNode) resourceSparseApplyFtrlV2' op'options var accum linear grad indices lr l1 l2 l2_shrinkage lr_power | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs accum, buildInputs linear, buildInputs grad, buildInputs indices, buildInputs lr, buildInputs l1, buildInputs l2, buildInputs l2_shrinkage, buildInputs lr_power] buildOp [] (opDef "ResourceSparseApplyFtrlV2" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" type: DT_RESOURCE } input_arg { name: "accum" type: DT_RESOURCE } input_arg { name: "linear" type: DT_RESOURCE } input_arg { name: "grad" type_attr: "T" } input_arg { name: "indices" type_attr: "Tindices" } input_arg { name: "lr" type_attr: "T" } input_arg { name: "l1" type_attr: "T" } input_arg { name: "l2" type_attr: "T" } input_arg { name: "l2_shrinkage" type_attr: "T" } input_arg { name: "lr_power" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } } -} -- | resourceSparseApplyKerasMomentum :: forall v'1 v'2 v'3 v'4 v'5 v'6 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor v'1 ResourceHandle -- ^ __var__ -> Tensor v'2 ResourceHandle -- ^ __accum__ -> Tensor v'3 t -- ^ __lr__ -> Tensor v'4 t -- ^ __grad__ -> Tensor v'5 tindices -- ^ __indices__ -> Tensor v'6 t -- ^ __momentum__ -> m' (ControlNode) resourceSparseApplyKerasMomentum = resourceSparseApplyKerasMomentum' id resourceSparseApplyKerasMomentum' :: forall v'1 v'2 v'3 v'4 v'5 v'6 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor v'1 ResourceHandle -- ^ __var__ -> Tensor v'2 ResourceHandle -- ^ __accum__ -> Tensor v'3 t -- ^ __lr__ -> Tensor v'4 t -- ^ __grad__ -> Tensor v'5 tindices -- ^ __indices__ -> Tensor v'6 t -- ^ __momentum__ -> m' (ControlNode) resourceSparseApplyKerasMomentum' op'options var accum lr grad indices momentum | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs accum, buildInputs lr, buildInputs grad, buildInputs indices, buildInputs momentum] buildOp [] (opDef "ResourceSparseApplyKerasMomentum" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" type: DT_RESOURCE } input_arg { name: "accum" type: DT_RESOURCE } input_arg { name: "lr" type_attr: "T" } input_arg { name: "grad" type_attr: "T" } input_arg { name: "indices" type_attr: "Tindices" } input_arg { name: "momentum" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } } attr { name: "use_nesterov" type: "bool" default_value { b: false } } -} -- | resourceSparseApplyMomentum :: forall v'1 v'2 v'3 v'4 v'5 v'6 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor v'1 ResourceHandle -- ^ __var__ -> Tensor v'2 ResourceHandle -- ^ __accum__ -> Tensor v'3 t -- ^ __lr__ -> Tensor v'4 t -- ^ __grad__ -> Tensor v'5 tindices -- ^ __indices__ -> Tensor v'6 t -- ^ __momentum__ -> m' (ControlNode) resourceSparseApplyMomentum = resourceSparseApplyMomentum' id resourceSparseApplyMomentum' :: forall v'1 v'2 v'3 v'4 v'5 v'6 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor v'1 ResourceHandle -- ^ __var__ -> Tensor v'2 ResourceHandle -- ^ __accum__ -> Tensor v'3 t -- ^ __lr__ -> Tensor v'4 t -- ^ __grad__ -> Tensor v'5 tindices -- ^ __indices__ -> Tensor v'6 t -- ^ __momentum__ -> m' (ControlNode) resourceSparseApplyMomentum' op'options var accum lr grad indices momentum | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs accum, buildInputs lr, buildInputs grad, buildInputs indices, buildInputs momentum] buildOp [] (opDef "ResourceSparseApplyMomentum" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" type: DT_RESOURCE } input_arg { name: "accum" type: DT_RESOURCE } input_arg { name: "lr" type_attr: "T" } input_arg { name: "grad" type_attr: "T" } input_arg { name: "indices" type_attr: "Tindices" } input_arg { name: "momentum" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } } attr { name: "use_nesterov" type: "bool" default_value { b: false } } -} -- | resourceSparseApplyProximalAdagrad :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor v'1 ResourceHandle -- ^ __var__ -> Tensor v'2 ResourceHandle -- ^ __accum__ -> Tensor v'3 t -- ^ __lr__ -> Tensor v'4 t -- ^ __l1__ -> Tensor v'5 t -- ^ __l2__ -> Tensor v'6 t -- ^ __grad__ -> Tensor v'7 tindices -- ^ __indices__ -> m' (ControlNode) resourceSparseApplyProximalAdagrad = resourceSparseApplyProximalAdagrad' id resourceSparseApplyProximalAdagrad' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor v'1 ResourceHandle -- ^ __var__ -> Tensor v'2 ResourceHandle -- ^ __accum__ -> Tensor v'3 t -- ^ __lr__ -> Tensor v'4 t -- ^ __l1__ -> Tensor v'5 t -- ^ __l2__ -> Tensor v'6 t -- ^ __grad__ -> Tensor v'7 tindices -- ^ __indices__ -> m' (ControlNode) resourceSparseApplyProximalAdagrad' op'options var accum lr l1 l2 grad indices | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs accum, buildInputs lr, buildInputs l1, buildInputs l2, buildInputs grad, buildInputs indices] buildOp [] (opDef "ResourceSparseApplyProximalAdagrad" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" type: DT_RESOURCE } input_arg { name: "accum" type: DT_RESOURCE } input_arg { name: "lr" type_attr: "T" } input_arg { name: "l1" type_attr: "T" } input_arg { name: "l2" type_attr: "T" } input_arg { name: "grad" type_attr: "T" } input_arg { name: "indices" type_attr: "Tindices" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } } -} -- | resourceSparseApplyProximalGradientDescent :: forall v'1 v'2 v'3 v'4 v'5 v'6 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor v'1 ResourceHandle -- ^ __var__ -> Tensor v'2 t -- ^ __alpha__ -> Tensor v'3 t -- ^ __l1__ -> Tensor v'4 t -- ^ __l2__ -> Tensor v'5 t -- ^ __grad__ -> Tensor v'6 tindices -- ^ __indices__ -> m' (ControlNode) resourceSparseApplyProximalGradientDescent = resourceSparseApplyProximalGradientDescent' id resourceSparseApplyProximalGradientDescent' :: forall v'1 v'2 v'3 v'4 v'5 v'6 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor v'1 ResourceHandle -- ^ __var__ -> Tensor v'2 t -- ^ __alpha__ -> Tensor v'3 t -- ^ __l1__ -> Tensor v'4 t -- ^ __l2__ -> Tensor v'5 t -- ^ __grad__ -> Tensor v'6 tindices -- ^ __indices__ -> m' (ControlNode) resourceSparseApplyProximalGradientDescent' op'options var alpha l1 l2 grad indices | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs alpha, buildInputs l1, buildInputs l2, buildInputs grad, buildInputs indices] buildOp [] (opDef "ResourceSparseApplyProximalGradientDescent" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" type: DT_RESOURCE } input_arg { name: "alpha" type_attr: "T" } input_arg { name: "l1" type_attr: "T" } input_arg { name: "l2" type_attr: "T" } input_arg { name: "grad" type_attr: "T" } input_arg { name: "indices" type_attr: "Tindices" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } } -} -- | resourceSparseApplyRMSProp :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor v'1 ResourceHandle -- ^ __var__ -> Tensor v'2 ResourceHandle -- ^ __ms__ -> Tensor v'3 ResourceHandle -- ^ __mom__ -> Tensor v'4 t -- ^ __lr__ -> Tensor v'5 t -- ^ __rho__ -> Tensor v'6 t -- ^ __momentum__ -> Tensor v'7 t -- ^ __epsilon__ -> Tensor v'8 t -- ^ __grad__ -> Tensor v'9 tindices -- ^ __indices__ -> m' (ControlNode) resourceSparseApplyRMSProp = resourceSparseApplyRMSProp' id resourceSparseApplyRMSProp' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 v'8 v'9 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor v'1 ResourceHandle -- ^ __var__ -> Tensor v'2 ResourceHandle -- ^ __ms__ -> Tensor v'3 ResourceHandle -- ^ __mom__ -> Tensor v'4 t -- ^ __lr__ -> Tensor v'5 t -- ^ __rho__ -> Tensor v'6 t -- ^ __momentum__ -> Tensor v'7 t -- ^ __epsilon__ -> Tensor v'8 t -- ^ __grad__ -> Tensor v'9 tindices -- ^ __indices__ -> m' (ControlNode) resourceSparseApplyRMSProp' op'options var ms mom lr rho momentum epsilon grad indices | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs ms, buildInputs mom, buildInputs lr, buildInputs rho, buildInputs momentum, buildInputs epsilon, buildInputs grad, buildInputs indices] buildOp [] (opDef "ResourceSparseApplyRMSProp" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" type: DT_RESOURCE } input_arg { name: "ms" type: DT_RESOURCE } input_arg { name: "mom" type: DT_RESOURCE } input_arg { name: "lr" type_attr: "T" } input_arg { name: "rho" type_attr: "T" } input_arg { name: "momentum" type_attr: "T" } input_arg { name: "epsilon" type_attr: "T" } input_arg { name: "grad" type_attr: "T" } input_arg { name: "indices" type_attr: "Tindices" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } } -} -- | resourceStridedSliceAssign :: forall v'1 v'2 v'3 v'4 v'5 t index m' . (MonadBuild m', TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] index) => Tensor v'1 ResourceHandle -- ^ __ref__ -> Tensor v'2 index -- ^ __begin__ -> Tensor v'3 index -- ^ __end__ -> Tensor v'4 index -- ^ __strides__ -> Tensor v'5 t -- ^ __value__ -> m' (ControlNode) resourceStridedSliceAssign = resourceStridedSliceAssign' id resourceStridedSliceAssign' :: forall v'1 v'2 v'3 v'4 v'5 t index m' . (MonadBuild m', TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] index) => OpParams -> Tensor v'1 ResourceHandle -- ^ __ref__ -> Tensor v'2 index -- ^ __begin__ -> Tensor v'3 index -- ^ __end__ -> Tensor v'4 index -- ^ __strides__ -> Tensor v'5 t -- ^ __value__ -> m' (ControlNode) resourceStridedSliceAssign' op'options ref begin end strides value | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs ref, buildInputs begin, buildInputs end, buildInputs strides, buildInputs value] buildOp [] (opDef "ResourceStridedSliceAssign" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Index" .~ tensorType (undefined :: index) & op'options & opInputs .~ op'inputs) {- input_arg { name: "ref" type: DT_RESOURCE } input_arg { name: "begin" type_attr: "Index" } input_arg { name: "end" type_attr: "Index" } input_arg { name: "strides" type_attr: "Index" } input_arg { name: "value" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "Index" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "begin_mask" type: "int" default_value { i: 0 } } attr { name: "end_mask" type: "int" default_value { i: 0 } } attr { name: "ellipsis_mask" type: "int" default_value { i: 0 } } attr { name: "new_axis_mask" type: "int" default_value { i: 0 } } attr { name: "shrink_axis_mask" type: "int" default_value { i: 0 } } -} -- | restore :: forall v'1 v'2 dt m' . (MonadBuild m', TensorType dt) => Tensor v'1 Data.ByteString.ByteString -- ^ __file_pattern__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __tensor_name__ -> m' (Tensor Value dt) -- ^ __tensor__ restore = restore' id restore' :: forall v'1 v'2 dt m' . (MonadBuild m', TensorType dt) => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __file_pattern__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __tensor_name__ -> m' (Tensor Value dt) -- ^ __tensor__ restore' op'options file_pattern tensor_name | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs file_pattern, buildInputs tensor_name] buildOp [] (opDef "Restore" & opAttr "dt" .~ tensorType (undefined :: dt) & op'options & opInputs .~ op'inputs) {- input_arg { name: "file_pattern" type: DT_STRING } input_arg { name: "tensor_name" type: DT_STRING } output_arg { name: "tensor" type_attr: "dt" } attr { name: "dt" type: "type" } attr { name: "preferred_shard" type: "int" default_value { i: -1 } } -} -- | restoreSlice :: forall v'1 v'2 v'3 dt m' . (MonadBuild m', TensorType dt) => Tensor v'1 Data.ByteString.ByteString -- ^ __file_pattern__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __tensor_name__ -> Tensor v'3 Data.ByteString.ByteString -- ^ __shape_and_slice__ -> m' (Tensor Value dt) -- ^ __tensor__ restoreSlice = restoreSlice' id restoreSlice' :: forall v'1 v'2 v'3 dt m' . (MonadBuild m', TensorType dt) => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __file_pattern__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __tensor_name__ -> Tensor v'3 Data.ByteString.ByteString -- ^ __shape_and_slice__ -> m' (Tensor Value dt) -- ^ __tensor__ restoreSlice' op'options file_pattern tensor_name shape_and_slice | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs file_pattern, buildInputs tensor_name, buildInputs shape_and_slice] buildOp [] (opDef "RestoreSlice" & opAttr "dt" .~ tensorType (undefined :: dt) & op'options & opInputs .~ op'inputs) {- input_arg { name: "file_pattern" type: DT_STRING } input_arg { name: "tensor_name" type: DT_STRING } input_arg { name: "shape_and_slice" type: DT_STRING } output_arg { name: "tensor" type_attr: "dt" } attr { name: "dt" type: "type" } attr { name: "preferred_shard" type: "int" default_value { i: -1 } } -} -- | restoreV2 :: forall v'1 v'2 v'3 dtypes m' . (MonadBuild m', TensorTypes dtypes) => Tensor v'1 Data.ByteString.ByteString -- ^ __prefix__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __tensor_names__ -> Tensor v'3 Data.ByteString.ByteString -- ^ __shape_and_slices__ -> m' (TensorList (Value) dtypes) -- ^ __tensors__ restoreV2 = restoreV2' id restoreV2' :: forall v'1 v'2 v'3 dtypes m' . (MonadBuild m', TensorTypes dtypes) => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __prefix__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __tensor_names__ -> Tensor v'3 Data.ByteString.ByteString -- ^ __shape_and_slices__ -> m' (TensorList (Value) dtypes) -- ^ __tensors__ restoreV2' op'options prefix tensor_names shape_and_slices | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs prefix, buildInputs tensor_names, buildInputs shape_and_slices] buildOp [] (opDef "RestoreV2" & opAttr "dtypes" .~ fromTensorTypes (Proxy :: Proxy dtypes) & op'options & opInputs .~ op'inputs) {- input_arg { name: "prefix" type: DT_STRING } input_arg { name: "tensor_names" type: DT_STRING } input_arg { name: "shape_and_slices" type: DT_STRING } output_arg { name: "tensors" type_list_attr: "dtypes" } attr { name: "dtypes" type: "list(type)" has_minimum: true minimum: 1 } -} -- | retrieveTPUEmbeddingADAMParameters :: forall m' . (MonadBuild m') => Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> m' ((Tensor Value Float, Tensor Value Float, Tensor Value Float)) -- ^ (__parameters__, __momenta__, __velocities__) -- -- * __parameters__ -- -- * __momenta__ -- -- * __velocities__ retrieveTPUEmbeddingADAMParameters = retrieveTPUEmbeddingADAMParameters' id retrieveTPUEmbeddingADAMParameters' :: forall m' . (MonadBuild m') => OpParams -> Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> m' ((Tensor Value Float, Tensor Value Float, Tensor Value Float)) -- ^ (__parameters__, __momenta__, __velocities__) -- -- * __parameters__ -- -- * __momenta__ -- -- * __velocities__ retrieveTPUEmbeddingADAMParameters' op'options num_shards shard_id | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "RetrieveTPUEmbeddingADAMParameters" & opAttr "num_shards" .~ num_shards & opAttr "shard_id" .~ shard_id & op'options & opInputs .~ op'inputs) {- output_arg { name: "parameters" type: DT_FLOAT } output_arg { name: "momenta" type: DT_FLOAT } output_arg { name: "velocities" type: DT_FLOAT } attr { name: "table_id" type: "int" default_value { i: -1 } has_minimum: true minimum: -1 } attr { name: "table_name" type: "string" default_value { s: "" } } attr { name: "num_shards" type: "int" } attr { name: "shard_id" type: "int" } -} -- | retrieveTPUEmbeddingADAMParametersGradAccumDebug :: forall m' . (MonadBuild m') => Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> m' ((Tensor Value Float, Tensor Value Float, Tensor Value Float, Tensor Value Float)) -- ^ (__parameters__, __momenta__, __velocities__, __gradient_accumulators__) -- -- * __parameters__ -- -- * __momenta__ -- -- * __velocities__ -- -- * __gradient_accumulators__ retrieveTPUEmbeddingADAMParametersGradAccumDebug = retrieveTPUEmbeddingADAMParametersGradAccumDebug' id retrieveTPUEmbeddingADAMParametersGradAccumDebug' :: forall m' . (MonadBuild m') => OpParams -> Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> m' ((Tensor Value Float, Tensor Value Float, Tensor Value Float, Tensor Value Float)) -- ^ (__parameters__, __momenta__, __velocities__, __gradient_accumulators__) -- -- * __parameters__ -- -- * __momenta__ -- -- * __velocities__ -- -- * __gradient_accumulators__ retrieveTPUEmbeddingADAMParametersGradAccumDebug' op'options num_shards shard_id | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "RetrieveTPUEmbeddingADAMParametersGradAccumDebug" & opAttr "num_shards" .~ num_shards & opAttr "shard_id" .~ shard_id & op'options & opInputs .~ op'inputs) {- output_arg { name: "parameters" type: DT_FLOAT } output_arg { name: "momenta" type: DT_FLOAT } output_arg { name: "velocities" type: DT_FLOAT } output_arg { name: "gradient_accumulators" type: DT_FLOAT } attr { name: "table_id" type: "int" default_value { i: -1 } has_minimum: true minimum: -1 } attr { name: "table_name" type: "string" default_value { s: "" } } attr { name: "num_shards" type: "int" } attr { name: "shard_id" type: "int" } -} -- | retrieveTPUEmbeddingAdadeltaParameters :: forall m' . (MonadBuild m') => Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> m' ((Tensor Value Float, Tensor Value Float, Tensor Value Float)) -- ^ (__parameters__, __accumulators__, __updates__) -- -- * __parameters__ -- -- * __accumulators__ -- -- * __updates__ retrieveTPUEmbeddingAdadeltaParameters = retrieveTPUEmbeddingAdadeltaParameters' id retrieveTPUEmbeddingAdadeltaParameters' :: forall m' . (MonadBuild m') => OpParams -> Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> m' ((Tensor Value Float, Tensor Value Float, Tensor Value Float)) -- ^ (__parameters__, __accumulators__, __updates__) -- -- * __parameters__ -- -- * __accumulators__ -- -- * __updates__ retrieveTPUEmbeddingAdadeltaParameters' op'options num_shards shard_id | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "RetrieveTPUEmbeddingAdadeltaParameters" & opAttr "num_shards" .~ num_shards & opAttr "shard_id" .~ shard_id & op'options & opInputs .~ op'inputs) {- output_arg { name: "parameters" type: DT_FLOAT } output_arg { name: "accumulators" type: DT_FLOAT } output_arg { name: "updates" type: DT_FLOAT } attr { name: "table_id" type: "int" default_value { i: -1 } has_minimum: true minimum: -1 } attr { name: "table_name" type: "string" default_value { s: "" } } attr { name: "num_shards" type: "int" } attr { name: "shard_id" type: "int" } -} -- | retrieveTPUEmbeddingAdadeltaParametersGradAccumDebug :: forall m' . (MonadBuild m') => Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> m' ((Tensor Value Float, Tensor Value Float, Tensor Value Float, Tensor Value Float)) -- ^ (__parameters__, __accumulators__, __updates__, __gradient_accumulators__) -- -- * __parameters__ -- -- * __accumulators__ -- -- * __updates__ -- -- * __gradient_accumulators__ retrieveTPUEmbeddingAdadeltaParametersGradAccumDebug = retrieveTPUEmbeddingAdadeltaParametersGradAccumDebug' id retrieveTPUEmbeddingAdadeltaParametersGradAccumDebug' :: forall m' . (MonadBuild m') => OpParams -> Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> m' ((Tensor Value Float, Tensor Value Float, Tensor Value Float, Tensor Value Float)) -- ^ (__parameters__, __accumulators__, __updates__, __gradient_accumulators__) -- -- * __parameters__ -- -- * __accumulators__ -- -- * __updates__ -- -- * __gradient_accumulators__ retrieveTPUEmbeddingAdadeltaParametersGradAccumDebug' op'options num_shards shard_id | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug" & opAttr "num_shards" .~ num_shards & opAttr "shard_id" .~ shard_id & op'options & opInputs .~ op'inputs) {- output_arg { name: "parameters" type: DT_FLOAT } output_arg { name: "accumulators" type: DT_FLOAT } output_arg { name: "updates" type: DT_FLOAT } output_arg { name: "gradient_accumulators" type: DT_FLOAT } attr { name: "table_id" type: "int" default_value { i: -1 } has_minimum: true minimum: -1 } attr { name: "table_name" type: "string" default_value { s: "" } } attr { name: "num_shards" type: "int" } attr { name: "shard_id" type: "int" } -} -- | retrieveTPUEmbeddingAdagradParameters :: forall m' . (MonadBuild m') => Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> m' ((Tensor Value Float, Tensor Value Float)) -- ^ (__parameters__, __accumulators__) -- -- * __parameters__ -- -- * __accumulators__ retrieveTPUEmbeddingAdagradParameters = retrieveTPUEmbeddingAdagradParameters' id retrieveTPUEmbeddingAdagradParameters' :: forall m' . (MonadBuild m') => OpParams -> Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> m' ((Tensor Value Float, Tensor Value Float)) -- ^ (__parameters__, __accumulators__) -- -- * __parameters__ -- -- * __accumulators__ retrieveTPUEmbeddingAdagradParameters' op'options num_shards shard_id | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "RetrieveTPUEmbeddingAdagradParameters" & opAttr "num_shards" .~ num_shards & opAttr "shard_id" .~ shard_id & op'options & opInputs .~ op'inputs) {- output_arg { name: "parameters" type: DT_FLOAT } output_arg { name: "accumulators" type: DT_FLOAT } attr { name: "table_id" type: "int" default_value { i: -1 } has_minimum: true minimum: -1 } attr { name: "table_name" type: "string" default_value { s: "" } } attr { name: "num_shards" type: "int" } attr { name: "shard_id" type: "int" } -} -- | retrieveTPUEmbeddingAdagradParametersGradAccumDebug :: forall m' . (MonadBuild m') => Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> m' ((Tensor Value Float, Tensor Value Float, Tensor Value Float)) -- ^ (__parameters__, __accumulators__, __gradient_accumulators__) -- -- * __parameters__ -- -- * __accumulators__ -- -- * __gradient_accumulators__ retrieveTPUEmbeddingAdagradParametersGradAccumDebug = retrieveTPUEmbeddingAdagradParametersGradAccumDebug' id retrieveTPUEmbeddingAdagradParametersGradAccumDebug' :: forall m' . (MonadBuild m') => OpParams -> Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> m' ((Tensor Value Float, Tensor Value Float, Tensor Value Float)) -- ^ (__parameters__, __accumulators__, __gradient_accumulators__) -- -- * __parameters__ -- -- * __accumulators__ -- -- * __gradient_accumulators__ retrieveTPUEmbeddingAdagradParametersGradAccumDebug' op'options num_shards shard_id | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "RetrieveTPUEmbeddingAdagradParametersGradAccumDebug" & opAttr "num_shards" .~ num_shards & opAttr "shard_id" .~ shard_id & op'options & opInputs .~ op'inputs) {- output_arg { name: "parameters" type: DT_FLOAT } output_arg { name: "accumulators" type: DT_FLOAT } output_arg { name: "gradient_accumulators" type: DT_FLOAT } attr { name: "table_id" type: "int" default_value { i: -1 } has_minimum: true minimum: -1 } attr { name: "table_name" type: "string" default_value { s: "" } } attr { name: "num_shards" type: "int" } attr { name: "shard_id" type: "int" } -} -- | retrieveTPUEmbeddingCenteredRMSPropParameters :: forall m' . (MonadBuild m') => Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> m' ((Tensor Value Float, Tensor Value Float, Tensor Value Float, Tensor Value Float)) -- ^ (__parameters__, __ms__, __mom__, __mg__) -- -- * __parameters__ -- -- * __ms__ -- -- * __mom__ -- -- * __mg__ retrieveTPUEmbeddingCenteredRMSPropParameters = retrieveTPUEmbeddingCenteredRMSPropParameters' id retrieveTPUEmbeddingCenteredRMSPropParameters' :: forall m' . (MonadBuild m') => OpParams -> Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> m' ((Tensor Value Float, Tensor Value Float, Tensor Value Float, Tensor Value Float)) -- ^ (__parameters__, __ms__, __mom__, __mg__) -- -- * __parameters__ -- -- * __ms__ -- -- * __mom__ -- -- * __mg__ retrieveTPUEmbeddingCenteredRMSPropParameters' op'options num_shards shard_id | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "RetrieveTPUEmbeddingCenteredRMSPropParameters" & opAttr "num_shards" .~ num_shards & opAttr "shard_id" .~ shard_id & op'options & opInputs .~ op'inputs) {- output_arg { name: "parameters" type: DT_FLOAT } output_arg { name: "ms" type: DT_FLOAT } output_arg { name: "mom" type: DT_FLOAT } output_arg { name: "mg" type: DT_FLOAT } attr { name: "table_id" type: "int" default_value { i: -1 } has_minimum: true minimum: -1 } attr { name: "table_name" type: "string" default_value { s: "" } } attr { name: "num_shards" type: "int" } attr { name: "shard_id" type: "int" } -} -- | retrieveTPUEmbeddingFTRLParameters :: forall m' . (MonadBuild m') => Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> m' ((Tensor Value Float, Tensor Value Float, Tensor Value Float)) -- ^ (__parameters__, __accumulators__, __linears__) -- -- * __parameters__ -- -- * __accumulators__ -- -- * __linears__ retrieveTPUEmbeddingFTRLParameters = retrieveTPUEmbeddingFTRLParameters' id retrieveTPUEmbeddingFTRLParameters' :: forall m' . (MonadBuild m') => OpParams -> Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> m' ((Tensor Value Float, Tensor Value Float, Tensor Value Float)) -- ^ (__parameters__, __accumulators__, __linears__) -- -- * __parameters__ -- -- * __accumulators__ -- -- * __linears__ retrieveTPUEmbeddingFTRLParameters' op'options num_shards shard_id | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "RetrieveTPUEmbeddingFTRLParameters" & opAttr "num_shards" .~ num_shards & opAttr "shard_id" .~ shard_id & op'options & opInputs .~ op'inputs) {- output_arg { name: "parameters" type: DT_FLOAT } output_arg { name: "accumulators" type: DT_FLOAT } output_arg { name: "linears" type: DT_FLOAT } attr { name: "table_id" type: "int" default_value { i: -1 } has_minimum: true minimum: -1 } attr { name: "table_name" type: "string" default_value { s: "" } } attr { name: "num_shards" type: "int" } attr { name: "shard_id" type: "int" } -} -- | retrieveTPUEmbeddingFTRLParametersGradAccumDebug :: forall m' . (MonadBuild m') => Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> m' ((Tensor Value Float, Tensor Value Float, Tensor Value Float, Tensor Value Float)) -- ^ (__parameters__, __accumulators__, __linears__, __gradient_accumulators__) -- -- * __parameters__ -- -- * __accumulators__ -- -- * __linears__ -- -- * __gradient_accumulators__ retrieveTPUEmbeddingFTRLParametersGradAccumDebug = retrieveTPUEmbeddingFTRLParametersGradAccumDebug' id retrieveTPUEmbeddingFTRLParametersGradAccumDebug' :: forall m' . (MonadBuild m') => OpParams -> Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> m' ((Tensor Value Float, Tensor Value Float, Tensor Value Float, Tensor Value Float)) -- ^ (__parameters__, __accumulators__, __linears__, __gradient_accumulators__) -- -- * __parameters__ -- -- * __accumulators__ -- -- * __linears__ -- -- * __gradient_accumulators__ retrieveTPUEmbeddingFTRLParametersGradAccumDebug' op'options num_shards shard_id | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "RetrieveTPUEmbeddingFTRLParametersGradAccumDebug" & opAttr "num_shards" .~ num_shards & opAttr "shard_id" .~ shard_id & op'options & opInputs .~ op'inputs) {- output_arg { name: "parameters" type: DT_FLOAT } output_arg { name: "accumulators" type: DT_FLOAT } output_arg { name: "linears" type: DT_FLOAT } output_arg { name: "gradient_accumulators" type: DT_FLOAT } attr { name: "table_id" type: "int" default_value { i: -1 } has_minimum: true minimum: -1 } attr { name: "table_name" type: "string" default_value { s: "" } } attr { name: "num_shards" type: "int" } attr { name: "shard_id" type: "int" } -} -- | retrieveTPUEmbeddingMDLAdagradLightParameters :: forall m' . (MonadBuild m') => Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> m' ((Tensor Value Float, Tensor Value Float, Tensor Value Float, Tensor Value Float)) -- ^ (__parameters__, __accumulators__, __weights__, __benefits__) -- -- * __parameters__ -- -- * __accumulators__ -- -- * __weights__ -- -- * __benefits__ retrieveTPUEmbeddingMDLAdagradLightParameters = retrieveTPUEmbeddingMDLAdagradLightParameters' id retrieveTPUEmbeddingMDLAdagradLightParameters' :: forall m' . (MonadBuild m') => OpParams -> Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> m' ((Tensor Value Float, Tensor Value Float, Tensor Value Float, Tensor Value Float)) -- ^ (__parameters__, __accumulators__, __weights__, __benefits__) -- -- * __parameters__ -- -- * __accumulators__ -- -- * __weights__ -- -- * __benefits__ retrieveTPUEmbeddingMDLAdagradLightParameters' op'options num_shards shard_id | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "RetrieveTPUEmbeddingMDLAdagradLightParameters" & opAttr "num_shards" .~ num_shards & opAttr "shard_id" .~ shard_id & op'options & opInputs .~ op'inputs) {- output_arg { name: "parameters" type: DT_FLOAT } output_arg { name: "accumulators" type: DT_FLOAT } output_arg { name: "weights" type: DT_FLOAT } output_arg { name: "benefits" type: DT_FLOAT } attr { name: "table_id" type: "int" default_value { i: -1 } has_minimum: true minimum: -1 } attr { name: "table_name" type: "string" default_value { s: "" } } attr { name: "num_shards" type: "int" } attr { name: "shard_id" type: "int" } -} -- | retrieveTPUEmbeddingMomentumParameters :: forall m' . (MonadBuild m') => Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> m' ((Tensor Value Float, Tensor Value Float)) -- ^ (__parameters__, __momenta__) -- -- * __parameters__ -- -- * __momenta__ retrieveTPUEmbeddingMomentumParameters = retrieveTPUEmbeddingMomentumParameters' id retrieveTPUEmbeddingMomentumParameters' :: forall m' . (MonadBuild m') => OpParams -> Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> m' ((Tensor Value Float, Tensor Value Float)) -- ^ (__parameters__, __momenta__) -- -- * __parameters__ -- -- * __momenta__ retrieveTPUEmbeddingMomentumParameters' op'options num_shards shard_id | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "RetrieveTPUEmbeddingMomentumParameters" & opAttr "num_shards" .~ num_shards & opAttr "shard_id" .~ shard_id & op'options & opInputs .~ op'inputs) {- output_arg { name: "parameters" type: DT_FLOAT } output_arg { name: "momenta" type: DT_FLOAT } attr { name: "table_id" type: "int" default_value { i: -1 } has_minimum: true minimum: -1 } attr { name: "table_name" type: "string" default_value { s: "" } } attr { name: "num_shards" type: "int" } attr { name: "shard_id" type: "int" } -} -- | retrieveTPUEmbeddingMomentumParametersGradAccumDebug :: forall m' . (MonadBuild m') => Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> m' ((Tensor Value Float, Tensor Value Float, Tensor Value Float)) -- ^ (__parameters__, __momenta__, __gradient_accumulators__) -- -- * __parameters__ -- -- * __momenta__ -- -- * __gradient_accumulators__ retrieveTPUEmbeddingMomentumParametersGradAccumDebug = retrieveTPUEmbeddingMomentumParametersGradAccumDebug' id retrieveTPUEmbeddingMomentumParametersGradAccumDebug' :: forall m' . (MonadBuild m') => OpParams -> Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> m' ((Tensor Value Float, Tensor Value Float, Tensor Value Float)) -- ^ (__parameters__, __momenta__, __gradient_accumulators__) -- -- * __parameters__ -- -- * __momenta__ -- -- * __gradient_accumulators__ retrieveTPUEmbeddingMomentumParametersGradAccumDebug' op'options num_shards shard_id | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "RetrieveTPUEmbeddingMomentumParametersGradAccumDebug" & opAttr "num_shards" .~ num_shards & opAttr "shard_id" .~ shard_id & op'options & opInputs .~ op'inputs) {- output_arg { name: "parameters" type: DT_FLOAT } output_arg { name: "momenta" type: DT_FLOAT } output_arg { name: "gradient_accumulators" type: DT_FLOAT } attr { name: "table_id" type: "int" default_value { i: -1 } has_minimum: true minimum: -1 } attr { name: "table_name" type: "string" default_value { s: "" } } attr { name: "num_shards" type: "int" } attr { name: "shard_id" type: "int" } -} -- | retrieveTPUEmbeddingProximalAdagradParameters :: forall m' . (MonadBuild m') => Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> m' ((Tensor Value Float, Tensor Value Float)) -- ^ (__parameters__, __accumulators__) -- -- * __parameters__ -- -- * __accumulators__ retrieveTPUEmbeddingProximalAdagradParameters = retrieveTPUEmbeddingProximalAdagradParameters' id retrieveTPUEmbeddingProximalAdagradParameters' :: forall m' . (MonadBuild m') => OpParams -> Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> m' ((Tensor Value Float, Tensor Value Float)) -- ^ (__parameters__, __accumulators__) -- -- * __parameters__ -- -- * __accumulators__ retrieveTPUEmbeddingProximalAdagradParameters' op'options num_shards shard_id | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "RetrieveTPUEmbeddingProximalAdagradParameters" & opAttr "num_shards" .~ num_shards & opAttr "shard_id" .~ shard_id & op'options & opInputs .~ op'inputs) {- output_arg { name: "parameters" type: DT_FLOAT } output_arg { name: "accumulators" type: DT_FLOAT } attr { name: "table_id" type: "int" default_value { i: -1 } has_minimum: true minimum: -1 } attr { name: "table_name" type: "string" default_value { s: "" } } attr { name: "num_shards" type: "int" } attr { name: "shard_id" type: "int" } -} -- | retrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug :: forall m' . (MonadBuild m') => Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> m' ((Tensor Value Float, Tensor Value Float, Tensor Value Float)) -- ^ (__parameters__, __accumulators__, __gradient_accumulators__) -- -- * __parameters__ -- -- * __accumulators__ -- -- * __gradient_accumulators__ retrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug = retrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug' id retrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug' :: forall m' . (MonadBuild m') => OpParams -> Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> m' ((Tensor Value Float, Tensor Value Float, Tensor Value Float)) -- ^ (__parameters__, __accumulators__, __gradient_accumulators__) -- -- * __parameters__ -- -- * __accumulators__ -- -- * __gradient_accumulators__ retrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug' op'options num_shards shard_id | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug" & opAttr "num_shards" .~ num_shards & opAttr "shard_id" .~ shard_id & op'options & opInputs .~ op'inputs) {- output_arg { name: "parameters" type: DT_FLOAT } output_arg { name: "accumulators" type: DT_FLOAT } output_arg { name: "gradient_accumulators" type: DT_FLOAT } attr { name: "table_id" type: "int" default_value { i: -1 } has_minimum: true minimum: -1 } attr { name: "table_name" type: "string" default_value { s: "" } } attr { name: "num_shards" type: "int" } attr { name: "shard_id" type: "int" } -} -- | retrieveTPUEmbeddingRMSPropParameters :: forall m' . (MonadBuild m') => Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> m' ((Tensor Value Float, Tensor Value Float, Tensor Value Float)) -- ^ (__parameters__, __ms__, __mom__) -- -- * __parameters__ -- -- * __ms__ -- -- * __mom__ retrieveTPUEmbeddingRMSPropParameters = retrieveTPUEmbeddingRMSPropParameters' id retrieveTPUEmbeddingRMSPropParameters' :: forall m' . (MonadBuild m') => OpParams -> Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> m' ((Tensor Value Float, Tensor Value Float, Tensor Value Float)) -- ^ (__parameters__, __ms__, __mom__) -- -- * __parameters__ -- -- * __ms__ -- -- * __mom__ retrieveTPUEmbeddingRMSPropParameters' op'options num_shards shard_id | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "RetrieveTPUEmbeddingRMSPropParameters" & opAttr "num_shards" .~ num_shards & opAttr "shard_id" .~ shard_id & op'options & opInputs .~ op'inputs) {- output_arg { name: "parameters" type: DT_FLOAT } output_arg { name: "ms" type: DT_FLOAT } output_arg { name: "mom" type: DT_FLOAT } attr { name: "table_id" type: "int" default_value { i: -1 } has_minimum: true minimum: -1 } attr { name: "table_name" type: "string" default_value { s: "" } } attr { name: "num_shards" type: "int" } attr { name: "shard_id" type: "int" } -} -- | retrieveTPUEmbeddingRMSPropParametersGradAccumDebug :: forall m' . (MonadBuild m') => Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> m' ((Tensor Value Float, Tensor Value Float, Tensor Value Float, Tensor Value Float)) -- ^ (__parameters__, __ms__, __mom__, __gradient_accumulators__) -- -- * __parameters__ -- -- * __ms__ -- -- * __mom__ -- -- * __gradient_accumulators__ retrieveTPUEmbeddingRMSPropParametersGradAccumDebug = retrieveTPUEmbeddingRMSPropParametersGradAccumDebug' id retrieveTPUEmbeddingRMSPropParametersGradAccumDebug' :: forall m' . (MonadBuild m') => OpParams -> Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> m' ((Tensor Value Float, Tensor Value Float, Tensor Value Float, Tensor Value Float)) -- ^ (__parameters__, __ms__, __mom__, __gradient_accumulators__) -- -- * __parameters__ -- -- * __ms__ -- -- * __mom__ -- -- * __gradient_accumulators__ retrieveTPUEmbeddingRMSPropParametersGradAccumDebug' op'options num_shards shard_id | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug" & opAttr "num_shards" .~ num_shards & opAttr "shard_id" .~ shard_id & op'options & opInputs .~ op'inputs) {- output_arg { name: "parameters" type: DT_FLOAT } output_arg { name: "ms" type: DT_FLOAT } output_arg { name: "mom" type: DT_FLOAT } output_arg { name: "gradient_accumulators" type: DT_FLOAT } attr { name: "table_id" type: "int" default_value { i: -1 } has_minimum: true minimum: -1 } attr { name: "table_name" type: "string" default_value { s: "" } } attr { name: "num_shards" type: "int" } attr { name: "shard_id" type: "int" } -} -- | retrieveTPUEmbeddingStochasticGradientDescentParameters :: forall m' . (MonadBuild m') => Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> m' (Tensor Value Float) -- ^ __parameters__ retrieveTPUEmbeddingStochasticGradientDescentParameters = retrieveTPUEmbeddingStochasticGradientDescentParameters' id retrieveTPUEmbeddingStochasticGradientDescentParameters' :: forall m' . (MonadBuild m') => OpParams -> Data.Int.Int64 -- ^ __num_shards__ -> Data.Int.Int64 -- ^ __shard_id__ -> m' (Tensor Value Float) -- ^ __parameters__ retrieveTPUEmbeddingStochasticGradientDescentParameters' op'options num_shards shard_id | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "RetrieveTPUEmbeddingStochasticGradientDescentParameters" & opAttr "num_shards" .~ num_shards & opAttr "shard_id" .~ shard_id & op'options & opInputs .~ op'inputs) {- output_arg { name: "parameters" type: DT_FLOAT } attr { name: "table_id" type: "int" default_value { i: -1 } has_minimum: true minimum: -1 } attr { name: "table_name" type: "string" default_value { s: "" } } attr { name: "num_shards" type: "int" } attr { name: "shard_id" type: "int" } -} -- | reverse :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Bool, Data.ByteString.ByteString, Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __tensor__ -> Tensor v'2 Bool -- ^ __dims__ -> Tensor Build t -- ^ __output__ reverse = reverse' id reverse' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Bool, Data.ByteString.ByteString, Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __tensor__ -> Tensor v'2 Bool -- ^ __dims__ -> Tensor Build t -- ^ __output__ reverse' op'options tensor dims | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs tensor, buildInputs dims] return (opDef "Reverse" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "tensor" type_attr: "T" } input_arg { name: "dims" type: DT_BOOL } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_UINT8 type: DT_INT8 type: DT_UINT16 type: DT_INT16 type: DT_INT32 type: DT_INT64 type: DT_BOOL type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_STRING } } } -} -- | reverseSequence :: forall v'1 v'2 t tlen . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tlen) => Data.Int.Int64 -- ^ __seq_dim__ -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 tlen -- ^ __seq_lengths__ -> Tensor Build t -- ^ __output__ reverseSequence = reverseSequence' id reverseSequence' :: forall v'1 v'2 t tlen . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tlen) => OpParams -> Data.Int.Int64 -- ^ __seq_dim__ -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 tlen -- ^ __seq_lengths__ -> Tensor Build t -- ^ __output__ reverseSequence' op'options seq_dim input seq_lengths | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs seq_lengths] return (opDef "ReverseSequence" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tlen" .~ tensorType (undefined :: tlen) & opAttr "seq_dim" .~ seq_dim & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "seq_lengths" type_attr: "Tlen" } output_arg { name: "output" type_attr: "T" } attr { name: "seq_dim" type: "int" } attr { name: "batch_dim" type: "int" default_value { i: 0 } } attr { name: "T" type: "type" } attr { name: "Tlen" type: "type" default_value { type: DT_INT64 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | reverseV2 :: forall v'1 v'2 tidx t . (OneOf '[Data.Int.Int32, Data.Int.Int64] tidx, OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Bool, Data.ByteString.ByteString, Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __tensor__ -> Tensor v'2 tidx -- ^ __axis__ -> Tensor Build t -- ^ __output__ reverseV2 = reverseV2' id reverseV2' :: forall v'1 v'2 tidx t . (OneOf '[Data.Int.Int32, Data.Int.Int64] tidx, OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Bool, Data.ByteString.ByteString, Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __tensor__ -> Tensor v'2 tidx -- ^ __axis__ -> Tensor Build t -- ^ __output__ reverseV2' op'options tensor axis | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs tensor, buildInputs axis] return (opDef "ReverseV2" & opAttr "Tidx" .~ tensorType (undefined :: tidx) & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "tensor" type_attr: "T" } input_arg { name: "axis" type_attr: "Tidx" } output_arg { name: "output" type_attr: "T" } attr { name: "Tidx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "T" type: "type" allowed_values { list { type: DT_UINT8 type: DT_INT8 type: DT_UINT16 type: DT_INT16 type: DT_INT32 type: DT_INT64 type: DT_BOOL type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_STRING } } } -} -- | rightShift :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ rightShift = rightShift' id rightShift' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ rightShift' op'options x y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y] return (opDef "RightShift" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_INT8 type: DT_INT16 type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_UINT16 type: DT_UINT32 type: DT_UINT64 } } } -} -- | rint :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ rint = rint' id rint' :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ rint' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "Rint" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } -} -- | rngSkip :: forall v'1 v'2 v'3 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __resource__ -> Tensor v'2 Data.Int.Int64 -- ^ __algorithm__ -> Tensor v'3 Data.Int.Int64 -- ^ __delta__ -> m' (ControlNode) rngSkip = rngSkip' id rngSkip' :: forall v'1 v'2 v'3 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __resource__ -> Tensor v'2 Data.Int.Int64 -- ^ __algorithm__ -> Tensor v'3 Data.Int.Int64 -- ^ __delta__ -> m' (ControlNode) rngSkip' op'options resource algorithm delta | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs resource, buildInputs algorithm, buildInputs delta] buildOp [] (opDef "RngSkip" & op'options & opInputs .~ op'inputs) {- input_arg { name: "resource" type: DT_RESOURCE } input_arg { name: "algorithm" type: DT_INT64 } input_arg { name: "delta" type: DT_INT64 } -} -- | roll :: forall v'1 v'2 v'3 t tshift taxis . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tshift, OneOf '[Data.Int.Int32, Data.Int.Int64] taxis) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 tshift -- ^ __shift__ -> Tensor v'3 taxis -- ^ __axis__ -> Tensor Build t -- ^ __output__ roll = roll' id roll' :: forall v'1 v'2 v'3 t tshift taxis . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tshift, OneOf '[Data.Int.Int32, Data.Int.Int64] taxis) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 tshift -- ^ __shift__ -> Tensor v'3 taxis -- ^ __axis__ -> Tensor Build t -- ^ __output__ roll' op'options input shift axis | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs shift, buildInputs axis] return (opDef "Roll" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tshift" .~ tensorType (undefined :: tshift) & opAttr "Taxis" .~ tensorType (undefined :: taxis) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "shift" type_attr: "Tshift" } input_arg { name: "axis" type_attr: "Taxis" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "Tshift" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "Taxis" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | round :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ round = round' id round' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ round' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "Round" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | rpc :: forall v'1 v'2 v'3 m' . (MonadBuild m') => Tensor v'1 Data.ByteString.ByteString -- ^ __address__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __method__ -> Tensor v'3 Data.ByteString.ByteString -- ^ __request__ -> m' (Tensor Value Data.ByteString.ByteString) -- ^ __response__ rpc = rpc' id rpc' :: forall v'1 v'2 v'3 m' . (MonadBuild m') => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __address__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __method__ -> Tensor v'3 Data.ByteString.ByteString -- ^ __request__ -> m' (Tensor Value Data.ByteString.ByteString) -- ^ __response__ rpc' op'options address method request | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs address, buildInputs method, buildInputs request] buildOp [] (opDef "Rpc" & op'options & opInputs .~ op'inputs) {- input_arg { name: "address" type: DT_STRING } input_arg { name: "method" type: DT_STRING } input_arg { name: "request" type: DT_STRING } output_arg { name: "response" type: DT_STRING } attr { name: "protocol" type: "string" default_value { s: "" } } attr { name: "fail_fast" type: "bool" default_value { b: true } } attr { name: "timeout_in_ms" type: "int" default_value { i: 0 } } -} -- | rsqrt :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ rsqrt = rsqrt' id rsqrt' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ rsqrt' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "Rsqrt" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | rsqrtGrad :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __y__ -> Tensor v'2 t -- ^ __dy__ -> Tensor Build t -- ^ __z__ rsqrtGrad = rsqrtGrad' id rsqrtGrad' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __y__ -> Tensor v'2 t -- ^ __dy__ -> Tensor Build t -- ^ __z__ rsqrtGrad' op'options y dy | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs y, buildInputs dy] return (opDef "RsqrtGrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "y" type_attr: "T" } input_arg { name: "dy" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | sampleDistortedBoundingBox :: forall v'1 v'2 t m' . (MonadBuild m', OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word8] t) => Tensor v'1 t -- ^ __image_size__ -> Tensor v'2 Float -- ^ __bounding_boxes__ -> m' ((Tensor Value t, Tensor Value t, Tensor Value Float)) -- ^ (__begin__, __size__, __bboxes__) -- -- * __begin__ -- -- * __size__ -- -- * __bboxes__ sampleDistortedBoundingBox = sampleDistortedBoundingBox' id sampleDistortedBoundingBox' :: forall v'1 v'2 t m' . (MonadBuild m', OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word8] t) => OpParams -> Tensor v'1 t -- ^ __image_size__ -> Tensor v'2 Float -- ^ __bounding_boxes__ -> m' ((Tensor Value t, Tensor Value t, Tensor Value Float)) -- ^ (__begin__, __size__, __bboxes__) -- -- * __begin__ -- -- * __size__ -- -- * __bboxes__ sampleDistortedBoundingBox' op'options image_size bounding_boxes | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs image_size, buildInputs bounding_boxes] buildOp [] (opDef "SampleDistortedBoundingBox" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "image_size" type_attr: "T" } input_arg { name: "bounding_boxes" type: DT_FLOAT } output_arg { name: "begin" type_attr: "T" } output_arg { name: "size" type_attr: "T" } output_arg { name: "bboxes" type: DT_FLOAT } attr { name: "T" type: "type" allowed_values { list { type: DT_UINT8 type: DT_INT8 type: DT_INT16 type: DT_INT32 type: DT_INT64 } } } attr { name: "seed" type: "int" default_value { i: 0 } } attr { name: "seed2" type: "int" default_value { i: 0 } } attr { name: "min_object_covered" type: "float" default_value { f: 0.1 } } attr { name: "aspect_ratio_range" type: "list(float)" default_value { list { f: 0.75 f: 1.33 } } } attr { name: "area_range" type: "list(float)" default_value { list { f: 5.0e-2 f: 1.0 } } } attr { name: "max_attempts" type: "int" default_value { i: 100 } } attr { name: "use_image_if_no_bounding_boxes" type: "bool" default_value { b: false } } -} -- | sampleDistortedBoundingBoxV2 :: forall v'1 v'2 v'3 t m' . (MonadBuild m', OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word8] t) => Tensor v'1 t -- ^ __image_size__ -> Tensor v'2 Float -- ^ __bounding_boxes__ -> Tensor v'3 Float -- ^ __min_object_covered__ -> m' ((Tensor Value t, Tensor Value t, Tensor Value Float)) -- ^ (__begin__, __size__, __bboxes__) -- -- * __begin__ -- -- * __size__ -- -- * __bboxes__ sampleDistortedBoundingBoxV2 = sampleDistortedBoundingBoxV2' id sampleDistortedBoundingBoxV2' :: forall v'1 v'2 v'3 t m' . (MonadBuild m', OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word8] t) => OpParams -> Tensor v'1 t -- ^ __image_size__ -> Tensor v'2 Float -- ^ __bounding_boxes__ -> Tensor v'3 Float -- ^ __min_object_covered__ -> m' ((Tensor Value t, Tensor Value t, Tensor Value Float)) -- ^ (__begin__, __size__, __bboxes__) -- -- * __begin__ -- -- * __size__ -- -- * __bboxes__ sampleDistortedBoundingBoxV2' op'options image_size bounding_boxes min_object_covered | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs image_size, buildInputs bounding_boxes, buildInputs min_object_covered] buildOp [] (opDef "SampleDistortedBoundingBoxV2" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "image_size" type_attr: "T" } input_arg { name: "bounding_boxes" type: DT_FLOAT } input_arg { name: "min_object_covered" type: DT_FLOAT } output_arg { name: "begin" type_attr: "T" } output_arg { name: "size" type_attr: "T" } output_arg { name: "bboxes" type: DT_FLOAT } attr { name: "T" type: "type" allowed_values { list { type: DT_UINT8 type: DT_INT8 type: DT_INT16 type: DT_INT32 type: DT_INT64 } } } attr { name: "seed" type: "int" default_value { i: 0 } } attr { name: "seed2" type: "int" default_value { i: 0 } } attr { name: "aspect_ratio_range" type: "list(float)" default_value { list { f: 0.75 f: 1.33 } } } attr { name: "area_range" type: "list(float)" default_value { list { f: 5.0e-2 f: 1.0 } } } attr { name: "max_attempts" type: "int" default_value { i: 100 } } attr { name: "use_image_if_no_bounding_boxes" type: "bool" default_value { b: false } } -} -- | samplingDataset :: [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor v'2 Float -- ^ __rate__ -> Tensor v'3 Data.Int.Int64 -- ^ __seed__ -> Tensor v'4 Data.Int.Int64 -- ^ __seed2__ -> Tensor Build Variant -- ^ __handle__ samplingDataset = samplingDataset' id samplingDataset' :: OpParams -> [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor v'2 Float -- ^ __rate__ -> Tensor v'3 Data.Int.Int64 -- ^ __seed__ -> Tensor v'4 Data.Int.Int64 -- ^ __seed2__ -> Tensor Build Variant -- ^ __handle__ samplingDataset' op'options output_types input_dataset rate seed seed2 | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_dataset, buildInputs rate, buildInputs seed, buildInputs seed2] return (opDef "SamplingDataset" & opAttr "output_types" .~ output_types & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_dataset" type: DT_VARIANT } input_arg { name: "rate" type: DT_FLOAT } input_arg { name: "seed" type: DT_INT64 } input_arg { name: "seed2" type: DT_INT64 } output_arg { name: "handle" type: DT_VARIANT } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } -} -- | save :: forall v'1 v'2 v'3 t m' . (MonadBuild m', TensorTypes t) => Tensor v'1 Data.ByteString.ByteString -- ^ __filename__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __tensor_names__ -> TensorList (v'3) t -- ^ __data__ -> m' (ControlNode) save = save' id save' :: forall v'1 v'2 v'3 t m' . (MonadBuild m', TensorTypes t) => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __filename__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __tensor_names__ -> TensorList (v'3) t -- ^ __data__ -> m' (ControlNode) save' op'options filename tensor_names data' | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs filename, buildInputs tensor_names, buildInputs data'] buildOp [] (opDef "Save" & opAttr "T" .~ fromTensorTypes (Proxy :: Proxy t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "filename" type: DT_STRING } input_arg { name: "tensor_names" type: DT_STRING } input_arg { name: "data" type_list_attr: "T" } attr { name: "T" type: "list(type)" has_minimum: true minimum: 1 } -} -- | saveSlices :: forall v'1 v'2 v'3 v'4 t m' . (MonadBuild m', TensorTypes t) => Tensor v'1 Data.ByteString.ByteString -- ^ __filename__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __tensor_names__ -> Tensor v'3 Data.ByteString.ByteString -- ^ __shapes_and_slices__ -> TensorList (v'4) t -- ^ __data__ -> m' (ControlNode) saveSlices = saveSlices' id saveSlices' :: forall v'1 v'2 v'3 v'4 t m' . (MonadBuild m', TensorTypes t) => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __filename__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __tensor_names__ -> Tensor v'3 Data.ByteString.ByteString -- ^ __shapes_and_slices__ -> TensorList (v'4) t -- ^ __data__ -> m' (ControlNode) saveSlices' op'options filename tensor_names shapes_and_slices data' | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs filename, buildInputs tensor_names, buildInputs shapes_and_slices, buildInputs data'] buildOp [] (opDef "SaveSlices" & opAttr "T" .~ fromTensorTypes (Proxy :: Proxy t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "filename" type: DT_STRING } input_arg { name: "tensor_names" type: DT_STRING } input_arg { name: "shapes_and_slices" type: DT_STRING } input_arg { name: "data" type_list_attr: "T" } attr { name: "T" type: "list(type)" has_minimum: true minimum: 1 } -} -- | saveV2 :: forall v'1 v'2 v'3 v'4 dtypes m' . (MonadBuild m', TensorTypes dtypes) => Tensor v'1 Data.ByteString.ByteString -- ^ __prefix__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __tensor_names__ -> Tensor v'3 Data.ByteString.ByteString -- ^ __shape_and_slices__ -> TensorList (v'4) dtypes -- ^ __tensors__ -> m' (ControlNode) saveV2 = saveV2' id saveV2' :: forall v'1 v'2 v'3 v'4 dtypes m' . (MonadBuild m', TensorTypes dtypes) => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __prefix__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __tensor_names__ -> Tensor v'3 Data.ByteString.ByteString -- ^ __shape_and_slices__ -> TensorList (v'4) dtypes -- ^ __tensors__ -> m' (ControlNode) saveV2' op'options prefix tensor_names shape_and_slices tensors | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs prefix, buildInputs tensor_names, buildInputs shape_and_slices, buildInputs tensors] buildOp [] (opDef "SaveV2" & opAttr "dtypes" .~ fromTensorTypes (Proxy :: Proxy dtypes) & op'options & opInputs .~ op'inputs) {- input_arg { name: "prefix" type: DT_STRING } input_arg { name: "tensor_names" type: DT_STRING } input_arg { name: "shape_and_slices" type: DT_STRING } input_arg { name: "tensors" type_list_attr: "dtypes" } attr { name: "dtypes" type: "list(type)" has_minimum: true minimum: 1 } -} -- | scalarSummary :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor v'1 Data.ByteString.ByteString -- ^ __tags__ -> Tensor v'2 t -- ^ __values__ -> Tensor Build Data.ByteString.ByteString -- ^ __summary__ scalarSummary = scalarSummary' id scalarSummary' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __tags__ -> Tensor v'2 t -- ^ __values__ -> Tensor Build Data.ByteString.ByteString -- ^ __summary__ scalarSummary' op'options tags values | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs tags, buildInputs values] return (opDef "ScalarSummary" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "tags" type: DT_STRING } input_arg { name: "values" type_attr: "T" } output_arg { name: "summary" type: DT_STRING } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_INT64 type: DT_BFLOAT16 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } -} -- | scaleAndTranslate :: forall v'1 v'2 v'3 v'4 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __images__ -> Tensor v'2 Data.Int.Int32 -- ^ __size__ -> Tensor v'3 Float -- ^ __scale__ -> Tensor v'4 Float -- ^ __translation__ -> Tensor Build Float -- ^ __resized_images__ scaleAndTranslate = scaleAndTranslate' id scaleAndTranslate' :: forall v'1 v'2 v'3 v'4 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __images__ -> Tensor v'2 Data.Int.Int32 -- ^ __size__ -> Tensor v'3 Float -- ^ __scale__ -> Tensor v'4 Float -- ^ __translation__ -> Tensor Build Float -- ^ __resized_images__ scaleAndTranslate' op'options images size scale translation | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs images, buildInputs size, buildInputs scale, buildInputs translation] return (opDef "ScaleAndTranslate" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "images" type_attr: "T" } input_arg { name: "size" type: DT_INT32 } input_arg { name: "scale" type: DT_FLOAT } input_arg { name: "translation" type: DT_FLOAT } output_arg { name: "resized_images" type: DT_FLOAT } attr { name: "T" type: "type" allowed_values { list { type: DT_INT8 type: DT_UINT8 type: DT_INT16 type: DT_UINT16 type: DT_INT32 type: DT_INT64 type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "kernel_type" type: "string" default_value { s: "lanczos3" } } attr { name: "antialias" type: "bool" default_value { b: true } } -} -- | scaleAndTranslateGrad :: forall v'1 v'2 v'3 v'4 t . (OneOf '[Float] t) => Tensor v'1 t -- ^ __grads__ -> Tensor v'2 t -- ^ __original_image__ -> Tensor v'3 Float -- ^ __scale__ -> Tensor v'4 Float -- ^ __translation__ -> Tensor Build t -- ^ __output__ scaleAndTranslateGrad = scaleAndTranslateGrad' id scaleAndTranslateGrad' :: forall v'1 v'2 v'3 v'4 t . (OneOf '[Float] t) => OpParams -> Tensor v'1 t -- ^ __grads__ -> Tensor v'2 t -- ^ __original_image__ -> Tensor v'3 Float -- ^ __scale__ -> Tensor v'4 Float -- ^ __translation__ -> Tensor Build t -- ^ __output__ scaleAndTranslateGrad' op'options grads original_image scale translation | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs grads, buildInputs original_image, buildInputs scale, buildInputs translation] return (opDef "ScaleAndTranslateGrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "grads" type_attr: "T" } input_arg { name: "original_image" type_attr: "T" } input_arg { name: "scale" type: DT_FLOAT } input_arg { name: "translation" type: DT_FLOAT } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT } } } attr { name: "kernel_type" type: "string" default_value { s: "lanczos3" } } attr { name: "antialias" type: "bool" default_value { b: true } } -} -- | scatterAdd :: forall v'2 v'3 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor Ref t -- ^ __ref__ -> Tensor v'2 tindices -- ^ __indices__ -> Tensor v'3 t -- ^ __updates__ -> m' (Tensor Ref t) -- ^ __output_ref__ scatterAdd = scatterAdd' id scatterAdd' :: forall v'2 v'3 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor Ref t -- ^ __ref__ -> Tensor v'2 tindices -- ^ __indices__ -> Tensor v'3 t -- ^ __updates__ -> m' (Tensor Ref t) -- ^ __output_ref__ scatterAdd' op'options ref indices updates | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs ref, buildInputs indices, buildInputs updates] buildOp [] (opDef "ScatterAdd" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "ref" type_attr: "T" is_ref: true } input_arg { name: "indices" type_attr: "Tindices" } input_arg { name: "updates" type_attr: "T" } output_arg { name: "output_ref" type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } } -} -- | scatterDiv :: forall v'2 v'3 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor Ref t -- ^ __ref__ -> Tensor v'2 tindices -- ^ __indices__ -> Tensor v'3 t -- ^ __updates__ -> m' (Tensor Ref t) -- ^ __output_ref__ scatterDiv = scatterDiv' id scatterDiv' :: forall v'2 v'3 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor Ref t -- ^ __ref__ -> Tensor v'2 tindices -- ^ __indices__ -> Tensor v'3 t -- ^ __updates__ -> m' (Tensor Ref t) -- ^ __output_ref__ scatterDiv' op'options ref indices updates | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs ref, buildInputs indices, buildInputs updates] buildOp [] (opDef "ScatterDiv" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "ref" type_attr: "T" is_ref: true } input_arg { name: "indices" type_attr: "Tindices" } input_arg { name: "updates" type_attr: "T" } output_arg { name: "output_ref" type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } } -} -- | scatterMax :: forall v'2 v'3 t tindices m' . (MonadBuild m', OneOf '[Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor Ref t -- ^ __ref__ -> Tensor v'2 tindices -- ^ __indices__ -> Tensor v'3 t -- ^ __updates__ -> m' (Tensor Ref t) -- ^ __output_ref__ scatterMax = scatterMax' id scatterMax' :: forall v'2 v'3 t tindices m' . (MonadBuild m', OneOf '[Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor Ref t -- ^ __ref__ -> Tensor v'2 tindices -- ^ __indices__ -> Tensor v'3 t -- ^ __updates__ -> m' (Tensor Ref t) -- ^ __output_ref__ scatterMax' op'options ref indices updates | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs ref, buildInputs indices, buildInputs updates] buildOp [] (opDef "ScatterMax" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "ref" type_attr: "T" is_ref: true } input_arg { name: "indices" type_attr: "Tindices" } input_arg { name: "updates" type_attr: "T" } output_arg { name: "output_ref" type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_BFLOAT16 type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } } -} -- | scatterMin :: forall v'2 v'3 t tindices m' . (MonadBuild m', OneOf '[Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor Ref t -- ^ __ref__ -> Tensor v'2 tindices -- ^ __indices__ -> Tensor v'3 t -- ^ __updates__ -> m' (Tensor Ref t) -- ^ __output_ref__ scatterMin = scatterMin' id scatterMin' :: forall v'2 v'3 t tindices m' . (MonadBuild m', OneOf '[Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor Ref t -- ^ __ref__ -> Tensor v'2 tindices -- ^ __indices__ -> Tensor v'3 t -- ^ __updates__ -> m' (Tensor Ref t) -- ^ __output_ref__ scatterMin' op'options ref indices updates | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs ref, buildInputs indices, buildInputs updates] buildOp [] (opDef "ScatterMin" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "ref" type_attr: "T" is_ref: true } input_arg { name: "indices" type_attr: "Tindices" } input_arg { name: "updates" type_attr: "T" } output_arg { name: "output_ref" type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_BFLOAT16 type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } } -} -- | scatterMul :: forall v'2 v'3 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor Ref t -- ^ __ref__ -> Tensor v'2 tindices -- ^ __indices__ -> Tensor v'3 t -- ^ __updates__ -> m' (Tensor Ref t) -- ^ __output_ref__ scatterMul = scatterMul' id scatterMul' :: forall v'2 v'3 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor Ref t -- ^ __ref__ -> Tensor v'2 tindices -- ^ __indices__ -> Tensor v'3 t -- ^ __updates__ -> m' (Tensor Ref t) -- ^ __output_ref__ scatterMul' op'options ref indices updates | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs ref, buildInputs indices, buildInputs updates] buildOp [] (opDef "ScatterMul" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "ref" type_attr: "T" is_ref: true } input_arg { name: "indices" type_attr: "Tindices" } input_arg { name: "updates" type_attr: "T" } output_arg { name: "output_ref" type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } } -} -- | scatterNd :: forall v'1 v'2 v'3 t tindices . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor v'1 tindices -- ^ __indices__ -> Tensor v'2 t -- ^ __updates__ -> Tensor v'3 tindices -- ^ __shape__ -> Tensor Build t -- ^ __output__ scatterNd = scatterNd' id scatterNd' :: forall v'1 v'2 v'3 t tindices . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor v'1 tindices -- ^ __indices__ -> Tensor v'2 t -- ^ __updates__ -> Tensor v'3 tindices -- ^ __shape__ -> Tensor Build t -- ^ __output__ scatterNd' op'options indices updates shape | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs indices, buildInputs updates, buildInputs shape] return (opDef "ScatterNd" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "indices" type_attr: "Tindices" } input_arg { name: "updates" type_attr: "T" } input_arg { name: "shape" type_attr: "Tindices" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | scatterNdAdd :: forall v'2 v'3 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor Ref t -- ^ __ref__ -> Tensor v'2 tindices -- ^ __indices__ -> Tensor v'3 t -- ^ __updates__ -> m' (Tensor Ref t) -- ^ __output_ref__ scatterNdAdd = scatterNdAdd' id scatterNdAdd' :: forall v'2 v'3 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor Ref t -- ^ __ref__ -> Tensor v'2 tindices -- ^ __indices__ -> Tensor v'3 t -- ^ __updates__ -> m' (Tensor Ref t) -- ^ __output_ref__ scatterNdAdd' op'options ref indices updates | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs ref, buildInputs indices, buildInputs updates] buildOp [] (opDef "ScatterNdAdd" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "ref" type_attr: "T" is_ref: true } input_arg { name: "indices" type_attr: "Tindices" } input_arg { name: "updates" type_attr: "T" } output_arg { name: "output_ref" type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } } -} -- | scatterNdNonAliasingAdd :: forall v'1 v'2 v'3 t tindices . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Bool, Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 tindices -- ^ __indices__ -> Tensor v'3 t -- ^ __updates__ -> Tensor Build t -- ^ __output__ scatterNdNonAliasingAdd = scatterNdNonAliasingAdd' id scatterNdNonAliasingAdd' :: forall v'1 v'2 v'3 t tindices . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Bool, Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 tindices -- ^ __indices__ -> Tensor v'3 t -- ^ __updates__ -> Tensor Build t -- ^ __output__ scatterNdNonAliasingAdd' op'options input indices updates | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs indices, buildInputs updates] return (opDef "ScatterNdNonAliasingAdd" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "indices" type_attr: "Tindices" } input_arg { name: "updates" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 type: DT_BOOL } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | scatterNdSub :: forall v'2 v'3 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor Ref t -- ^ __ref__ -> Tensor v'2 tindices -- ^ __indices__ -> Tensor v'3 t -- ^ __updates__ -> m' (Tensor Ref t) -- ^ __output_ref__ scatterNdSub = scatterNdSub' id scatterNdSub' :: forall v'2 v'3 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor Ref t -- ^ __ref__ -> Tensor v'2 tindices -- ^ __indices__ -> Tensor v'3 t -- ^ __updates__ -> m' (Tensor Ref t) -- ^ __output_ref__ scatterNdSub' op'options ref indices updates | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs ref, buildInputs indices, buildInputs updates] buildOp [] (opDef "ScatterNdSub" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "ref" type_attr: "T" is_ref: true } input_arg { name: "indices" type_attr: "Tindices" } input_arg { name: "updates" type_attr: "T" } output_arg { name: "output_ref" type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } } -} -- | scatterNdUpdate :: forall v'2 v'3 t tindices m' . (MonadBuild m', TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor Ref t -- ^ __ref__ -> Tensor v'2 tindices -- ^ __indices__ -> Tensor v'3 t -- ^ __updates__ -> m' (Tensor Ref t) -- ^ __output_ref__ scatterNdUpdate = scatterNdUpdate' id scatterNdUpdate' :: forall v'2 v'3 t tindices m' . (MonadBuild m', TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor Ref t -- ^ __ref__ -> Tensor v'2 tindices -- ^ __indices__ -> Tensor v'3 t -- ^ __updates__ -> m' (Tensor Ref t) -- ^ __output_ref__ scatterNdUpdate' op'options ref indices updates | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs ref, buildInputs indices, buildInputs updates] buildOp [] (opDef "ScatterNdUpdate" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "ref" type_attr: "T" is_ref: true } input_arg { name: "indices" type_attr: "Tindices" } input_arg { name: "updates" type_attr: "T" } output_arg { name: "output_ref" type_attr: "T" is_ref: true } attr { name: "T" type: "type" } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: true } } -} -- | scatterSub :: forall v'2 v'3 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor Ref t -- ^ __ref__ -> Tensor v'2 tindices -- ^ __indices__ -> Tensor v'3 t -- ^ __updates__ -> m' (Tensor Ref t) -- ^ __output_ref__ scatterSub = scatterSub' id scatterSub' :: forall v'2 v'3 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor Ref t -- ^ __ref__ -> Tensor v'2 tindices -- ^ __indices__ -> Tensor v'3 t -- ^ __updates__ -> m' (Tensor Ref t) -- ^ __output_ref__ scatterSub' op'options ref indices updates | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs ref, buildInputs indices, buildInputs updates] buildOp [] (opDef "ScatterSub" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "ref" type_attr: "T" is_ref: true } input_arg { name: "indices" type_attr: "Tindices" } input_arg { name: "updates" type_attr: "T" } output_arg { name: "output_ref" type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } } -} -- | scatterUpdate :: forall v'2 v'3 t tindices m' . (MonadBuild m', TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor Ref t -- ^ __ref__ -> Tensor v'2 tindices -- ^ __indices__ -> Tensor v'3 t -- ^ __updates__ -> m' (Tensor Ref t) -- ^ __output_ref__ scatterUpdate = scatterUpdate' id scatterUpdate' :: forall v'2 v'3 t tindices m' . (MonadBuild m', TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor Ref t -- ^ __ref__ -> Tensor v'2 tindices -- ^ __indices__ -> Tensor v'3 t -- ^ __updates__ -> m' (Tensor Ref t) -- ^ __output_ref__ scatterUpdate' op'options ref indices updates | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs ref, buildInputs indices, buildInputs updates] buildOp [] (opDef "ScatterUpdate" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "ref" type_attr: "T" is_ref: true } input_arg { name: "indices" type_attr: "Tindices" } input_arg { name: "updates" type_attr: "T" } output_arg { name: "output_ref" type_attr: "T" is_ref: true } attr { name: "T" type: "type" } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: true } } -} -- | sdcaFprint :: Tensor v'1 Data.ByteString.ByteString -- ^ __input__ -> Tensor Build Data.Int.Int64 -- ^ __output__ sdcaFprint = sdcaFprint' id sdcaFprint' :: OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __input__ -> Tensor Build Data.Int.Int64 -- ^ __output__ sdcaFprint' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "SdcaFprint" & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type: DT_STRING } output_arg { name: "output" type: DT_INT64 } -} -- | sdcaOptimizer :: Float -- ^ __l1__ -> Float -- ^ __l2__ -> Data.Int.Int64 -- ^ __num_inner_iterations__ -> Data.Int.Int64 -- ^ __num_loss_partitions__ -> [Tensor v'1 Data.Int.Int64] -- ^ __sparse_example_indices__ -> [Tensor v'2 Data.Int.Int64] -- ^ __sparse_feature_indices__ -> [Tensor v'3 Float] -- ^ __sparse_feature_values__ -> [Tensor v'4 Float] -- ^ __dense_features__ -> Tensor v'5 Float -- ^ __example_weights__ -> Tensor v'6 Float -- ^ __example_labels__ -> [Tensor v'7 Data.Int.Int64] -- ^ __sparse_indices__ -> [Tensor v'8 Float] -- ^ __sparse_weights__ -> [Tensor v'9 Float] -- ^ __dense_weights__ -> Tensor v'10 Float -- ^ __example_state_data__ -> (Tensor Build Float, [Tensor Build Float], [Tensor Build Float]) -- ^ (__out_example_state_data__, __out_delta_sparse_weights__, __out_delta_dense_weights__) -- -- * __out_example_state_data__ -- -- * __out_delta_sparse_weights__ -- -- * __out_delta_dense_weights__ sdcaOptimizer = sdcaOptimizer' id sdcaOptimizer' :: OpParams -> Float -- ^ __l1__ -> Float -- ^ __l2__ -> Data.Int.Int64 -- ^ __num_inner_iterations__ -> Data.Int.Int64 -- ^ __num_loss_partitions__ -> [Tensor v'1 Data.Int.Int64] -- ^ __sparse_example_indices__ -> [Tensor v'2 Data.Int.Int64] -- ^ __sparse_feature_indices__ -> [Tensor v'3 Float] -- ^ __sparse_feature_values__ -> [Tensor v'4 Float] -- ^ __dense_features__ -> Tensor v'5 Float -- ^ __example_weights__ -> Tensor v'6 Float -- ^ __example_labels__ -> [Tensor v'7 Data.Int.Int64] -- ^ __sparse_indices__ -> [Tensor v'8 Float] -- ^ __sparse_weights__ -> [Tensor v'9 Float] -- ^ __dense_weights__ -> Tensor v'10 Float -- ^ __example_state_data__ -> (Tensor Build Float, [Tensor Build Float], [Tensor Build Float]) -- ^ (__out_example_state_data__, __out_delta_sparse_weights__, __out_delta_dense_weights__) -- -- * __out_example_state_data__ -- -- * __out_delta_sparse_weights__ -- -- * __out_delta_dense_weights__ sdcaOptimizer' op'options l1 l2 num_inner_iterations num_loss_partitions sparse_example_indices sparse_feature_indices sparse_feature_values dense_features example_weights example_labels sparse_indices sparse_weights dense_weights example_state_data | eqLengthGuard [("num_sparse_features", [("sparse_example_indices", length sparse_example_indices), ("sparse_feature_indices", length sparse_feature_indices), ("sparse_indices", length sparse_indices), ("sparse_weights", length sparse_weights)]), ("num_sparse_features_with_values", [("sparse_feature_values", length sparse_feature_values)]), ("num_dense_features", [("dense_features", length dense_features), ("dense_weights", length dense_weights)])] = pureOp [num_sparse_features, num_dense_features] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs sparse_example_indices, buildInputs sparse_feature_indices, buildInputs sparse_feature_values, buildInputs dense_features, buildInputs example_weights, buildInputs example_labels, buildInputs sparse_indices, buildInputs sparse_weights, buildInputs dense_weights, buildInputs example_state_data] return (opDef "SdcaOptimizer" & opAttr "l1" .~ l1 & opAttr "l2" .~ l2 & opAttr "num_inner_iterations" .~ num_inner_iterations & opAttr "num_loss_partitions" .~ num_loss_partitions & opAttr "num_sparse_features" .~ num_sparse_features & opAttr "num_sparse_features_with_values" .~ num_sparse_features_with_values & opAttr "num_dense_features" .~ num_dense_features & op'options & opInputs .~ op'inputs) where num_sparse_features = fromIntegral (length sparse_example_indices) :: Int64 num_sparse_features_with_values = fromIntegral (length sparse_feature_values) :: Int64 num_dense_features = fromIntegral (length dense_features) :: Int64 {- input_arg { name: "sparse_example_indices" type: DT_INT64 number_attr: "num_sparse_features" } input_arg { name: "sparse_feature_indices" type: DT_INT64 number_attr: "num_sparse_features" } input_arg { name: "sparse_feature_values" type: DT_FLOAT number_attr: "num_sparse_features_with_values" } input_arg { name: "dense_features" type: DT_FLOAT number_attr: "num_dense_features" } input_arg { name: "example_weights" type: DT_FLOAT } input_arg { name: "example_labels" type: DT_FLOAT } input_arg { name: "sparse_indices" type: DT_INT64 number_attr: "num_sparse_features" } input_arg { name: "sparse_weights" type: DT_FLOAT number_attr: "num_sparse_features" } input_arg { name: "dense_weights" type: DT_FLOAT number_attr: "num_dense_features" } input_arg { name: "example_state_data" type: DT_FLOAT } output_arg { name: "out_example_state_data" type: DT_FLOAT } output_arg { name: "out_delta_sparse_weights" type: DT_FLOAT number_attr: "num_sparse_features" } output_arg { name: "out_delta_dense_weights" type: DT_FLOAT number_attr: "num_dense_features" } attr { name: "loss_type" type: "string" allowed_values { list { s: "logistic_loss" s: "squared_loss" s: "hinge_loss" s: "smooth_hinge_loss" s: "poisson_loss" } } } attr { name: "adaptative" type: "bool" default_value { b: false } } attr { name: "num_sparse_features" type: "int" has_minimum: true } attr { name: "num_sparse_features_with_values" type: "int" has_minimum: true } attr { name: "num_dense_features" type: "int" has_minimum: true } attr { name: "l1" type: "float" } attr { name: "l2" type: "float" } attr { name: "num_loss_partitions" type: "int" has_minimum: true minimum: 1 } attr { name: "num_inner_iterations" type: "int" has_minimum: true minimum: 1 } -} -- | sdcaOptimizerV2 :: Float -- ^ __l1__ -> Float -- ^ __l2__ -> Data.Int.Int64 -- ^ __num_inner_iterations__ -> Data.Int.Int64 -- ^ __num_loss_partitions__ -> [Tensor v'1 Data.Int.Int64] -- ^ __sparse_example_indices__ -> [Tensor v'2 Data.Int.Int64] -- ^ __sparse_feature_indices__ -> [Tensor v'3 Float] -- ^ __sparse_feature_values__ -> [Tensor v'4 Float] -- ^ __dense_features__ -> Tensor v'5 Float -- ^ __example_weights__ -> Tensor v'6 Float -- ^ __example_labels__ -> [Tensor v'7 Data.Int.Int64] -- ^ __sparse_indices__ -> [Tensor v'8 Float] -- ^ __sparse_weights__ -> [Tensor v'9 Float] -- ^ __dense_weights__ -> Tensor v'10 Float -- ^ __example_state_data__ -> (Tensor Build Float, [Tensor Build Float], [Tensor Build Float]) -- ^ (__out_example_state_data__, __out_delta_sparse_weights__, __out_delta_dense_weights__) -- -- * __out_example_state_data__ -- -- * __out_delta_sparse_weights__ -- -- * __out_delta_dense_weights__ sdcaOptimizerV2 = sdcaOptimizerV2' id sdcaOptimizerV2' :: OpParams -> Float -- ^ __l1__ -> Float -- ^ __l2__ -> Data.Int.Int64 -- ^ __num_inner_iterations__ -> Data.Int.Int64 -- ^ __num_loss_partitions__ -> [Tensor v'1 Data.Int.Int64] -- ^ __sparse_example_indices__ -> [Tensor v'2 Data.Int.Int64] -- ^ __sparse_feature_indices__ -> [Tensor v'3 Float] -- ^ __sparse_feature_values__ -> [Tensor v'4 Float] -- ^ __dense_features__ -> Tensor v'5 Float -- ^ __example_weights__ -> Tensor v'6 Float -- ^ __example_labels__ -> [Tensor v'7 Data.Int.Int64] -- ^ __sparse_indices__ -> [Tensor v'8 Float] -- ^ __sparse_weights__ -> [Tensor v'9 Float] -- ^ __dense_weights__ -> Tensor v'10 Float -- ^ __example_state_data__ -> (Tensor Build Float, [Tensor Build Float], [Tensor Build Float]) -- ^ (__out_example_state_data__, __out_delta_sparse_weights__, __out_delta_dense_weights__) -- -- * __out_example_state_data__ -- -- * __out_delta_sparse_weights__ -- -- * __out_delta_dense_weights__ sdcaOptimizerV2' op'options l1 l2 num_inner_iterations num_loss_partitions sparse_example_indices sparse_feature_indices sparse_feature_values dense_features example_weights example_labels sparse_indices sparse_weights dense_weights example_state_data | eqLengthGuard [("num_sparse_features", [("sparse_example_indices", length sparse_example_indices), ("sparse_feature_indices", length sparse_feature_indices), ("sparse_indices", length sparse_indices), ("sparse_weights", length sparse_weights)]), ("num_sparse_features_with_values", [("sparse_feature_values", length sparse_feature_values)]), ("num_dense_features", [("dense_features", length dense_features), ("dense_weights", length dense_weights)])] = pureOp [num_sparse_features, num_dense_features] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs sparse_example_indices, buildInputs sparse_feature_indices, buildInputs sparse_feature_values, buildInputs dense_features, buildInputs example_weights, buildInputs example_labels, buildInputs sparse_indices, buildInputs sparse_weights, buildInputs dense_weights, buildInputs example_state_data] return (opDef "SdcaOptimizerV2" & opAttr "l1" .~ l1 & opAttr "l2" .~ l2 & opAttr "num_inner_iterations" .~ num_inner_iterations & opAttr "num_loss_partitions" .~ num_loss_partitions & opAttr "num_sparse_features" .~ num_sparse_features & opAttr "num_sparse_features_with_values" .~ num_sparse_features_with_values & opAttr "num_dense_features" .~ num_dense_features & op'options & opInputs .~ op'inputs) where num_sparse_features = fromIntegral (length sparse_example_indices) :: Int64 num_sparse_features_with_values = fromIntegral (length sparse_feature_values) :: Int64 num_dense_features = fromIntegral (length dense_features) :: Int64 {- input_arg { name: "sparse_example_indices" type: DT_INT64 number_attr: "num_sparse_features" } input_arg { name: "sparse_feature_indices" type: DT_INT64 number_attr: "num_sparse_features" } input_arg { name: "sparse_feature_values" type: DT_FLOAT number_attr: "num_sparse_features_with_values" } input_arg { name: "dense_features" type: DT_FLOAT number_attr: "num_dense_features" } input_arg { name: "example_weights" type: DT_FLOAT } input_arg { name: "example_labels" type: DT_FLOAT } input_arg { name: "sparse_indices" type: DT_INT64 number_attr: "num_sparse_features" } input_arg { name: "sparse_weights" type: DT_FLOAT number_attr: "num_sparse_features" } input_arg { name: "dense_weights" type: DT_FLOAT number_attr: "num_dense_features" } input_arg { name: "example_state_data" type: DT_FLOAT } output_arg { name: "out_example_state_data" type: DT_FLOAT } output_arg { name: "out_delta_sparse_weights" type: DT_FLOAT number_attr: "num_sparse_features" } output_arg { name: "out_delta_dense_weights" type: DT_FLOAT number_attr: "num_dense_features" } attr { name: "loss_type" type: "string" allowed_values { list { s: "logistic_loss" s: "squared_loss" s: "hinge_loss" s: "smooth_hinge_loss" s: "poisson_loss" } } } attr { name: "adaptive" type: "bool" default_value { b: false } } attr { name: "num_sparse_features" type: "int" has_minimum: true } attr { name: "num_sparse_features_with_values" type: "int" has_minimum: true } attr { name: "num_dense_features" type: "int" has_minimum: true } attr { name: "l1" type: "float" } attr { name: "l2" type: "float" } attr { name: "num_loss_partitions" type: "int" has_minimum: true minimum: 1 } attr { name: "num_inner_iterations" type: "int" has_minimum: true minimum: 1 } -} -- | sdcaShrinkL1 :: forall m' . (MonadBuild m') => Float -- ^ __l1__ -> Float -- ^ __l2__ -> [Tensor Ref Float] -- ^ __weights__ -> m' (ControlNode) sdcaShrinkL1 = sdcaShrinkL1' id sdcaShrinkL1' :: forall m' . (MonadBuild m') => OpParams -> Float -- ^ __l1__ -> Float -- ^ __l2__ -> [Tensor Ref Float] -- ^ __weights__ -> m' (ControlNode) sdcaShrinkL1' op'options l1 l2 weights | eqLengthGuard [("num_features", [("weights", length weights)])] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs weights] buildOp [] (opDef "SdcaShrinkL1" & opAttr "l1" .~ l1 & opAttr "l2" .~ l2 & opAttr "num_features" .~ num_features & op'options & opInputs .~ op'inputs) where num_features = fromIntegral (length weights) :: Int64 {- input_arg { name: "weights" type: DT_FLOAT number_attr: "num_features" is_ref: true } attr { name: "num_features" type: "int" has_minimum: true } attr { name: "l1" type: "float" } attr { name: "l2" type: "float" } -} -- | segmentMax :: forall v'1 v'2 t tindices . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor v'1 t -- ^ __data__ -> Tensor v'2 tindices -- ^ __segment_ids__ -> Tensor Build t -- ^ __output__ segmentMax = segmentMax' id segmentMax' :: forall v'1 v'2 t tindices . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor v'1 t -- ^ __data__ -> Tensor v'2 tindices -- ^ __segment_ids__ -> Tensor Build t -- ^ __output__ segmentMax' op'options data' segment_ids | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs data', buildInputs segment_ids] return (opDef "SegmentMax" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "data" type_attr: "T" } input_arg { name: "segment_ids" type_attr: "Tindices" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_INT64 type: DT_BFLOAT16 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | segmentMean :: forall v'1 v'2 t tindices . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor v'1 t -- ^ __data__ -> Tensor v'2 tindices -- ^ __segment_ids__ -> Tensor Build t -- ^ __output__ segmentMean = segmentMean' id segmentMean' :: forall v'1 v'2 t tindices . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor v'1 t -- ^ __data__ -> Tensor v'2 tindices -- ^ __segment_ids__ -> Tensor Build t -- ^ __output__ segmentMean' op'options data' segment_ids | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs data', buildInputs segment_ids] return (opDef "SegmentMean" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "data" type_attr: "T" } input_arg { name: "segment_ids" type_attr: "Tindices" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | segmentMin :: forall v'1 v'2 t tindices . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor v'1 t -- ^ __data__ -> Tensor v'2 tindices -- ^ __segment_ids__ -> Tensor Build t -- ^ __output__ segmentMin = segmentMin' id segmentMin' :: forall v'1 v'2 t tindices . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor v'1 t -- ^ __data__ -> Tensor v'2 tindices -- ^ __segment_ids__ -> Tensor Build t -- ^ __output__ segmentMin' op'options data' segment_ids | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs data', buildInputs segment_ids] return (opDef "SegmentMin" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "data" type_attr: "T" } input_arg { name: "segment_ids" type_attr: "Tindices" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_INT64 type: DT_BFLOAT16 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | segmentProd :: forall v'1 v'2 t tindices . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor v'1 t -- ^ __data__ -> Tensor v'2 tindices -- ^ __segment_ids__ -> Tensor Build t -- ^ __output__ segmentProd = segmentProd' id segmentProd' :: forall v'1 v'2 t tindices . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor v'1 t -- ^ __data__ -> Tensor v'2 tindices -- ^ __segment_ids__ -> Tensor Build t -- ^ __output__ segmentProd' op'options data' segment_ids | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs data', buildInputs segment_ids] return (opDef "SegmentProd" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "data" type_attr: "T" } input_arg { name: "segment_ids" type_attr: "Tindices" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | segmentSum :: forall v'1 v'2 t tindices . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor v'1 t -- ^ __data__ -> Tensor v'2 tindices -- ^ __segment_ids__ -> Tensor Build t -- ^ __output__ segmentSum = segmentSum' id segmentSum' :: forall v'1 v'2 t tindices . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor v'1 t -- ^ __data__ -> Tensor v'2 tindices -- ^ __segment_ids__ -> Tensor Build t -- ^ __output__ segmentSum' op'options data' segment_ids | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs data', buildInputs segment_ids] return (opDef "SegmentSum" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "data" type_attr: "T" } input_arg { name: "segment_ids" type_attr: "Tindices" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | select :: forall v'1 v'2 v'3 t . (TensorType t) => Tensor v'1 Bool -- ^ __condition__ -> Tensor v'2 t -- ^ __t__ -> Tensor v'3 t -- ^ __e__ -> Tensor Build t -- ^ __output__ select = select' id select' :: forall v'1 v'2 v'3 t . (TensorType t) => OpParams -> Tensor v'1 Bool -- ^ __condition__ -> Tensor v'2 t -- ^ __t__ -> Tensor v'3 t -- ^ __e__ -> Tensor Build t -- ^ __output__ select' op'options condition t e | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs condition, buildInputs t, buildInputs e] return (opDef "Select" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "condition" type: DT_BOOL } input_arg { name: "t" type_attr: "T" } input_arg { name: "e" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } -} -- | selectV2 :: forall v'1 v'2 v'3 t . (TensorType t) => Tensor v'1 Bool -- ^ __condition__ -> Tensor v'2 t -- ^ __t__ -> Tensor v'3 t -- ^ __e__ -> Tensor Build t -- ^ __output__ selectV2 = selectV2' id selectV2' :: forall v'1 v'2 v'3 t . (TensorType t) => OpParams -> Tensor v'1 Bool -- ^ __condition__ -> Tensor v'2 t -- ^ __t__ -> Tensor v'3 t -- ^ __e__ -> Tensor Build t -- ^ __output__ selectV2' op'options condition t e | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs condition, buildInputs t, buildInputs e] return (opDef "SelectV2" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "condition" type: DT_BOOL } input_arg { name: "t" type_attr: "T" } input_arg { name: "e" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } -} -- | selfAdjointEig :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ selfAdjointEig = selfAdjointEig' id selfAdjointEig' :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ selfAdjointEig' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "SelfAdjointEig" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_DOUBLE type: DT_FLOAT type: DT_HALF } } } -} -- | selfAdjointEigV2 :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __input__ -> (Tensor Build t, Tensor Build t) -- ^ (__e__, __v__) -- -- * __e__ -- -- * __v__ selfAdjointEigV2 = selfAdjointEigV2' id selfAdjointEigV2' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> (Tensor Build t, Tensor Build t) -- ^ (__e__, __v__) -- -- * __e__ -- -- * __v__ selfAdjointEigV2' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "SelfAdjointEigV2" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "e" type_attr: "T" } output_arg { name: "v" type_attr: "T" } attr { name: "compute_v" type: "bool" default_value { b: true } } attr { name: "T" type: "type" allowed_values { list { type: DT_DOUBLE type: DT_FLOAT type: DT_HALF type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | selu :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __features__ -> Tensor Build t -- ^ __activations__ selu = selu' id selu' :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __features__ -> Tensor Build t -- ^ __activations__ selu' op'options features | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs features] return (opDef "Selu" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "features" type_attr: "T" } output_arg { name: "activations" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_BFLOAT16 type: DT_FLOAT type: DT_DOUBLE } } } -} -- | seluGrad :: forall v'1 v'2 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __gradients__ -> Tensor v'2 t -- ^ __outputs__ -> Tensor Build t -- ^ __backprops__ seluGrad = seluGrad' id seluGrad' :: forall v'1 v'2 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __gradients__ -> Tensor v'2 t -- ^ __outputs__ -> Tensor Build t -- ^ __backprops__ seluGrad' op'options gradients outputs | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs gradients, buildInputs outputs] return (opDef "SeluGrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "gradients" type_attr: "T" } input_arg { name: "outputs" type_attr: "T" } output_arg { name: "backprops" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_BFLOAT16 type: DT_FLOAT type: DT_DOUBLE } } } -} -- | sendTPUEmbeddingGradients :: forall v'1 v'2 m' . (MonadBuild m') => [Tensor v'1 Float] -- ^ __inputs__ -> [Tensor v'2 Float] -- ^ __learning_rates__ -> m' (ControlNode) sendTPUEmbeddingGradients = sendTPUEmbeddingGradients' id sendTPUEmbeddingGradients' :: forall v'1 v'2 m' . (MonadBuild m') => OpParams -> [Tensor v'1 Float] -- ^ __inputs__ -> [Tensor v'2 Float] -- ^ __learning_rates__ -> m' (ControlNode) sendTPUEmbeddingGradients' op'options inputs learning_rates | eqLengthGuard [("N", [("inputs", length inputs)]), ("NN", [("learning_rates", length learning_rates)])] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs inputs, buildInputs learning_rates] buildOp [] (opDef "SendTPUEmbeddingGradients" & opAttr "N" .~ n & opAttr "NN" .~ nN & op'options & opInputs .~ op'inputs) where n = fromIntegral (length inputs) :: Int64 nN = fromIntegral (length learning_rates) :: Int64 {- input_arg { name: "inputs" type: DT_FLOAT number_attr: "N" } input_arg { name: "learning_rates" type: DT_FLOAT number_attr: "NN" } attr { name: "N" type: "int" has_minimum: true minimum: 1 } attr { name: "NN" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "config" type: "string" } -} -- | serializeIterator :: forall v'1 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __resource_handle__ -> m' (Tensor Value Variant) -- ^ __serialized__ serializeIterator = serializeIterator' id serializeIterator' :: forall v'1 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __resource_handle__ -> m' (Tensor Value Variant) -- ^ __serialized__ serializeIterator' op'options resource_handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs resource_handle] buildOp [] (opDef "SerializeIterator" & op'options & opInputs .~ op'inputs) {- input_arg { name: "resource_handle" type: DT_RESOURCE } output_arg { name: "serialized" type: DT_VARIANT } -} -- | serializeManySparse :: forall v'1 v'2 v'3 t out_type . (TensorType t, OneOf '[Data.ByteString.ByteString, Variant] out_type) => Tensor v'1 Data.Int.Int64 -- ^ __sparse_indices__ -> Tensor v'2 t -- ^ __sparse_values__ -> Tensor v'3 Data.Int.Int64 -- ^ __sparse_shape__ -> Tensor Build out_type -- ^ __serialized_sparse__ serializeManySparse = serializeManySparse' id serializeManySparse' :: forall v'1 v'2 v'3 t out_type . (TensorType t, OneOf '[Data.ByteString.ByteString, Variant] out_type) => OpParams -> Tensor v'1 Data.Int.Int64 -- ^ __sparse_indices__ -> Tensor v'2 t -- ^ __sparse_values__ -> Tensor v'3 Data.Int.Int64 -- ^ __sparse_shape__ -> Tensor Build out_type -- ^ __serialized_sparse__ serializeManySparse' op'options sparse_indices sparse_values sparse_shape | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs sparse_indices, buildInputs sparse_values, buildInputs sparse_shape] return (opDef "SerializeManySparse" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "out_type" .~ tensorType (undefined :: out_type) & op'options & opInputs .~ op'inputs) {- input_arg { name: "sparse_indices" type: DT_INT64 } input_arg { name: "sparse_values" type_attr: "T" } input_arg { name: "sparse_shape" type: DT_INT64 } output_arg { name: "serialized_sparse" type_attr: "out_type" } attr { name: "T" type: "type" } attr { name: "out_type" type: "type" default_value { type: DT_STRING } allowed_values { list { type: DT_STRING type: DT_VARIANT } } } -} -- | serializeSparse :: forall v'1 v'2 v'3 t out_type . (TensorType t, OneOf '[Data.ByteString.ByteString, Variant] out_type) => Tensor v'1 Data.Int.Int64 -- ^ __sparse_indices__ -> Tensor v'2 t -- ^ __sparse_values__ -> Tensor v'3 Data.Int.Int64 -- ^ __sparse_shape__ -> Tensor Build out_type -- ^ __serialized_sparse__ serializeSparse = serializeSparse' id serializeSparse' :: forall v'1 v'2 v'3 t out_type . (TensorType t, OneOf '[Data.ByteString.ByteString, Variant] out_type) => OpParams -> Tensor v'1 Data.Int.Int64 -- ^ __sparse_indices__ -> Tensor v'2 t -- ^ __sparse_values__ -> Tensor v'3 Data.Int.Int64 -- ^ __sparse_shape__ -> Tensor Build out_type -- ^ __serialized_sparse__ serializeSparse' op'options sparse_indices sparse_values sparse_shape | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs sparse_indices, buildInputs sparse_values, buildInputs sparse_shape] return (opDef "SerializeSparse" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "out_type" .~ tensorType (undefined :: out_type) & op'options & opInputs .~ op'inputs) {- input_arg { name: "sparse_indices" type: DT_INT64 } input_arg { name: "sparse_values" type_attr: "T" } input_arg { name: "sparse_shape" type: DT_INT64 } output_arg { name: "serialized_sparse" type_attr: "out_type" } attr { name: "T" type: "type" } attr { name: "out_type" type: "type" default_value { type: DT_STRING } allowed_values { list { type: DT_STRING type: DT_VARIANT } } } -} -- | serializeTensor :: forall v'1 t . (TensorType t) => Tensor v'1 t -- ^ __tensor__ -> Tensor Build Data.ByteString.ByteString -- ^ __serialized__ serializeTensor = serializeTensor' id serializeTensor' :: forall v'1 t . (TensorType t) => OpParams -> Tensor v'1 t -- ^ __tensor__ -> Tensor Build Data.ByteString.ByteString -- ^ __serialized__ serializeTensor' op'options tensor | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs tensor] return (opDef "SerializeTensor" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "tensor" type_attr: "T" } output_arg { name: "serialized" type: DT_STRING } attr { name: "T" type: "type" } -} -- | setSize :: forall v'1 v'2 v'3 t . (OneOf '[Data.ByteString.ByteString, Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8] t) => Tensor v'1 Data.Int.Int64 -- ^ __set_indices__ -> Tensor v'2 t -- ^ __set_values__ -> Tensor v'3 Data.Int.Int64 -- ^ __set_shape__ -> Tensor Build Data.Int.Int32 -- ^ __size__ setSize = setSize' id setSize' :: forall v'1 v'2 v'3 t . (OneOf '[Data.ByteString.ByteString, Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8] t) => OpParams -> Tensor v'1 Data.Int.Int64 -- ^ __set_indices__ -> Tensor v'2 t -- ^ __set_values__ -> Tensor v'3 Data.Int.Int64 -- ^ __set_shape__ -> Tensor Build Data.Int.Int32 -- ^ __size__ setSize' op'options set_indices set_values set_shape | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs set_indices, buildInputs set_values, buildInputs set_shape] return (opDef "SetSize" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "set_indices" type: DT_INT64 } input_arg { name: "set_values" type_attr: "T" } input_arg { name: "set_shape" type: DT_INT64 } output_arg { name: "size" type: DT_INT32 } attr { name: "validate_indices" type: "bool" default_value { b: true } } attr { name: "T" type: "type" allowed_values { list { type: DT_INT8 type: DT_INT16 type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_UINT16 type: DT_STRING } } } -} -- | shape :: forall v'1 t out_type . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] out_type) => Tensor v'1 t -- ^ __input__ -> Tensor Build out_type -- ^ __output__ shape = shape' id shape' :: forall v'1 t out_type . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] out_type) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor Build out_type -- ^ __output__ shape' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "Shape" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "out_type" .~ tensorType (undefined :: out_type) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type_attr: "out_type" } attr { name: "T" type: "type" } attr { name: "out_type" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | shapeN :: forall v'1 t out_type . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] out_type) => [Tensor v'1 t] -- ^ __input__ -> [Tensor Build out_type] -- ^ __output__ shapeN = shapeN' id shapeN' :: forall v'1 t out_type . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] out_type) => OpParams -> [Tensor v'1 t] -- ^ __input__ -> [Tensor Build out_type] -- ^ __output__ shapeN' op'options input | eqLengthGuard [("N", [("input", length input)])] = pureOp [n] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "ShapeN" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "out_type" .~ tensorType (undefined :: out_type) & opAttr "N" .~ n & op'options & opInputs .~ op'inputs) where n = fromIntegral (length input) :: Int64 {- input_arg { name: "input" type_attr: "T" number_attr: "N" } output_arg { name: "output" type_attr: "out_type" number_attr: "N" } attr { name: "N" type: "int" has_minimum: true minimum: 1 } attr { name: "T" type: "type" } attr { name: "out_type" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | shardDataset :: [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor v'2 Data.Int.Int64 -- ^ __num_shards__ -> Tensor v'3 Data.Int.Int64 -- ^ __index__ -> Tensor Build Variant -- ^ __handle__ shardDataset = shardDataset' id shardDataset' :: OpParams -> [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor v'2 Data.Int.Int64 -- ^ __num_shards__ -> Tensor v'3 Data.Int.Int64 -- ^ __index__ -> Tensor Build Variant -- ^ __handle__ shardDataset' op'options output_types input_dataset num_shards index | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_dataset, buildInputs num_shards, buildInputs index] return (opDef "ShardDataset" & opAttr "output_types" .~ output_types & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_dataset" type: DT_VARIANT } input_arg { name: "num_shards" type: DT_INT64 } input_arg { name: "index" type: DT_INT64 } output_arg { name: "handle" type: DT_VARIANT } attr { name: "require_non_empty" type: "bool" default_value { b: false } } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } -} -- | shardedFilename :: Tensor v'1 Data.ByteString.ByteString -- ^ __basename__ -> Tensor v'2 Data.Int.Int32 -- ^ __shard__ -> Tensor v'3 Data.Int.Int32 -- ^ __num_shards__ -> Tensor Build Data.ByteString.ByteString -- ^ __filename__ shardedFilename = shardedFilename' id shardedFilename' :: OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __basename__ -> Tensor v'2 Data.Int.Int32 -- ^ __shard__ -> Tensor v'3 Data.Int.Int32 -- ^ __num_shards__ -> Tensor Build Data.ByteString.ByteString -- ^ __filename__ shardedFilename' op'options basename shard num_shards | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs basename, buildInputs shard, buildInputs num_shards] return (opDef "ShardedFilename" & op'options & opInputs .~ op'inputs) {- input_arg { name: "basename" type: DT_STRING } input_arg { name: "shard" type: DT_INT32 } input_arg { name: "num_shards" type: DT_INT32 } output_arg { name: "filename" type: DT_STRING } -} -- | shardedFilespec :: Tensor v'1 Data.ByteString.ByteString -- ^ __basename__ -> Tensor v'2 Data.Int.Int32 -- ^ __num_shards__ -> Tensor Build Data.ByteString.ByteString -- ^ __filename__ shardedFilespec = shardedFilespec' id shardedFilespec' :: OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __basename__ -> Tensor v'2 Data.Int.Int32 -- ^ __num_shards__ -> Tensor Build Data.ByteString.ByteString -- ^ __filename__ shardedFilespec' op'options basename num_shards | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs basename, buildInputs num_shards] return (opDef "ShardedFilespec" & op'options & opInputs .~ op'inputs) {- input_arg { name: "basename" type: DT_STRING } input_arg { name: "num_shards" type: DT_INT32 } output_arg { name: "filename" type: DT_STRING } -} -- | shuffleAndRepeatDataset :: [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor v'2 Data.Int.Int64 -- ^ __buffer_size__ -> Tensor v'3 Data.Int.Int64 -- ^ __seed__ -> Tensor v'4 Data.Int.Int64 -- ^ __seed2__ -> Tensor v'5 Data.Int.Int64 -- ^ __count__ -> Tensor Build Variant -- ^ __handle__ shuffleAndRepeatDataset = shuffleAndRepeatDataset' id shuffleAndRepeatDataset' :: OpParams -> [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor v'2 Data.Int.Int64 -- ^ __buffer_size__ -> Tensor v'3 Data.Int.Int64 -- ^ __seed__ -> Tensor v'4 Data.Int.Int64 -- ^ __seed2__ -> Tensor v'5 Data.Int.Int64 -- ^ __count__ -> Tensor Build Variant -- ^ __handle__ shuffleAndRepeatDataset' op'options output_types input_dataset buffer_size seed seed2 count | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_dataset, buildInputs buffer_size, buildInputs seed, buildInputs seed2, buildInputs count] return (opDef "ShuffleAndRepeatDataset" & opAttr "output_types" .~ output_types & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_dataset" type: DT_VARIANT } input_arg { name: "buffer_size" type: DT_INT64 } input_arg { name: "seed" type: DT_INT64 } input_arg { name: "seed2" type: DT_INT64 } input_arg { name: "count" type: DT_INT64 } output_arg { name: "handle" type: DT_VARIANT } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } -} -- | shuffleDataset :: [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor v'2 Data.Int.Int64 -- ^ __buffer_size__ -> Tensor v'3 Data.Int.Int64 -- ^ __seed__ -> Tensor v'4 Data.Int.Int64 -- ^ __seed2__ -> Tensor Build Variant -- ^ __handle__ shuffleDataset = shuffleDataset' id shuffleDataset' :: OpParams -> [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor v'2 Data.Int.Int64 -- ^ __buffer_size__ -> Tensor v'3 Data.Int.Int64 -- ^ __seed__ -> Tensor v'4 Data.Int.Int64 -- ^ __seed2__ -> Tensor Build Variant -- ^ __handle__ shuffleDataset' op'options output_types input_dataset buffer_size seed seed2 | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_dataset, buildInputs buffer_size, buildInputs seed, buildInputs seed2] return (opDef "ShuffleDataset" & opAttr "output_types" .~ output_types & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_dataset" type: DT_VARIANT } input_arg { name: "buffer_size" type: DT_INT64 } input_arg { name: "seed" type: DT_INT64 } input_arg { name: "seed2" type: DT_INT64 } output_arg { name: "handle" type: DT_VARIANT } attr { name: "reshuffle_each_iteration" type: "bool" default_value { b: true } } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } -} -- | shutdownDistributedTPU :: forall m' . (MonadBuild m') => m' (ControlNode) shutdownDistributedTPU = shutdownDistributedTPU' id shutdownDistributedTPU' :: forall m' . (MonadBuild m') => OpParams -> m' (ControlNode) shutdownDistributedTPU' op'options | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "ShutdownDistributedTPU" & op'options & opInputs .~ op'inputs) {- -} -- | sigmoid :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ sigmoid = sigmoid' id sigmoid' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ sigmoid' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "Sigmoid" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | sigmoidGrad :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __y__ -> Tensor v'2 t -- ^ __dy__ -> Tensor Build t -- ^ __z__ sigmoidGrad = sigmoidGrad' id sigmoidGrad' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __y__ -> Tensor v'2 t -- ^ __dy__ -> Tensor Build t -- ^ __z__ sigmoidGrad' op'options y dy | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs y, buildInputs dy] return (opDef "SigmoidGrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "y" type_attr: "T" } input_arg { name: "dy" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | sign :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ sign = sign' id sign' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ sign' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "Sign" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | sin :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ sin = sin' id sin' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ sin' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "Sin" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | sinh :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ sinh = sinh' id sinh' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ sinh' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "Sinh" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | size :: forall v'1 t out_type . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] out_type) => Tensor v'1 t -- ^ __input__ -> Tensor Build out_type -- ^ __output__ size = size' id size' :: forall v'1 t out_type . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] out_type) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor Build out_type -- ^ __output__ size' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "Size" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "out_type" .~ tensorType (undefined :: out_type) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type_attr: "out_type" } attr { name: "T" type: "type" } attr { name: "out_type" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | skipDataset :: [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor v'2 Data.Int.Int64 -- ^ __count__ -> Tensor Build Variant -- ^ __handle__ skipDataset = skipDataset' id skipDataset' :: OpParams -> [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor v'2 Data.Int.Int64 -- ^ __count__ -> Tensor Build Variant -- ^ __handle__ skipDataset' op'options output_types input_dataset count | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_dataset, buildInputs count] return (opDef "SkipDataset" & opAttr "output_types" .~ output_types & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_dataset" type: DT_VARIANT } input_arg { name: "count" type: DT_INT64 } output_arg { name: "handle" type: DT_VARIANT } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } -} -- | skipgram :: forall m' . (MonadBuild m') => Data.Int.Int64 -- ^ __batch_size__ -> m' ((Tensor Value Data.ByteString.ByteString, Tensor Value Data.Int.Int32, Tensor Value Data.Int.Int64, Tensor Value Data.Int.Int32, Tensor Value Data.Int.Int64, Tensor Value Data.Int.Int32, Tensor Value Data.Int.Int32)) -- ^ (__vocab_word__, __vocab_freq__, __words_per_epoch__, __current_epoch__, __total_words_processed__, __examples__, __labels__) -- -- * __vocab_word__ -- -- * __vocab_freq__ -- -- * __words_per_epoch__ -- -- * __current_epoch__ -- -- * __total_words_processed__ -- -- * __examples__ -- -- * __labels__ skipgram = skipgram' id skipgram' :: forall m' . (MonadBuild m') => OpParams -> Data.Int.Int64 -- ^ __batch_size__ -> m' ((Tensor Value Data.ByteString.ByteString, Tensor Value Data.Int.Int32, Tensor Value Data.Int.Int64, Tensor Value Data.Int.Int32, Tensor Value Data.Int.Int64, Tensor Value Data.Int.Int32, Tensor Value Data.Int.Int32)) -- ^ (__vocab_word__, __vocab_freq__, __words_per_epoch__, __current_epoch__, __total_words_processed__, __examples__, __labels__) -- -- * __vocab_word__ -- -- * __vocab_freq__ -- -- * __words_per_epoch__ -- -- * __current_epoch__ -- -- * __total_words_processed__ -- -- * __examples__ -- -- * __labels__ skipgram' op'options batch_size | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "Skipgram" & opAttr "batch_size" .~ batch_size & op'options & opInputs .~ op'inputs) {- output_arg { name: "vocab_word" type: DT_STRING } output_arg { name: "vocab_freq" type: DT_INT32 } output_arg { name: "words_per_epoch" type: DT_INT64 } output_arg { name: "current_epoch" type: DT_INT32 } output_arg { name: "total_words_processed" type: DT_INT64 } output_arg { name: "examples" type: DT_INT32 } output_arg { name: "labels" type: DT_INT32 } attr { name: "filename" type: "string" } attr { name: "batch_size" type: "int" } attr { name: "window_size" type: "int" default_value { i: 5 } } attr { name: "min_count" type: "int" default_value { i: 5 } } attr { name: "subsample" type: "float" default_value { f: 1.0e-3 } } -} -- | slice :: forall v'1 v'2 v'3 t index . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] index) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 index -- ^ __begin__ -> Tensor v'3 index -- ^ __size__ -> Tensor Build t -- ^ __output__ slice = slice' id slice' :: forall v'1 v'2 v'3 t index . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] index) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 index -- ^ __begin__ -> Tensor v'3 index -- ^ __size__ -> Tensor Build t -- ^ __output__ slice' op'options input begin size | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs begin, buildInputs size] return (opDef "Slice" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Index" .~ tensorType (undefined :: index) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "begin" type_attr: "Index" } input_arg { name: "size" type_attr: "Index" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "Index" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | snapshot :: forall v'1 t . (TensorType t) => Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ snapshot = snapshot' id snapshot' :: forall v'1 t . (TensorType t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ snapshot' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "Snapshot" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } -} -- | snapshotDataset :: [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __path__ -> Tensor Build Variant -- ^ __handle__ snapshotDataset = snapshotDataset' id snapshotDataset' :: OpParams -> [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __path__ -> Tensor Build Variant -- ^ __handle__ snapshotDataset' op'options output_types input_dataset path | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_dataset, buildInputs path] return (opDef "SnapshotDataset" & opAttr "output_types" .~ output_types & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_dataset" type: DT_VARIANT } input_arg { name: "path" type: DT_STRING } output_arg { name: "handle" type: DT_VARIANT } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } -} -- | softmax :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __logits__ -> Tensor Build t -- ^ __softmax__ softmax = softmax' id softmax' :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __logits__ -> Tensor Build t -- ^ __softmax__ softmax' op'options logits | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs logits] return (opDef "Softmax" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "logits" type_attr: "T" } output_arg { name: "softmax" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_BFLOAT16 type: DT_FLOAT type: DT_DOUBLE } } } -} -- | softmaxCrossEntropyWithLogits :: forall v'1 v'2 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __features__ -> Tensor v'2 t -- ^ __labels__ -> (Tensor Build t, Tensor Build t) -- ^ (__loss__, __backprop__) -- -- * __loss__ -- -- * __backprop__ softmaxCrossEntropyWithLogits = softmaxCrossEntropyWithLogits' id softmaxCrossEntropyWithLogits' :: forall v'1 v'2 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __features__ -> Tensor v'2 t -- ^ __labels__ -> (Tensor Build t, Tensor Build t) -- ^ (__loss__, __backprop__) -- -- * __loss__ -- -- * __backprop__ softmaxCrossEntropyWithLogits' op'options features labels | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs features, buildInputs labels] return (opDef "SoftmaxCrossEntropyWithLogits" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "features" type_attr: "T" } input_arg { name: "labels" type_attr: "T" } output_arg { name: "loss" type_attr: "T" } output_arg { name: "backprop" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_BFLOAT16 type: DT_FLOAT type: DT_DOUBLE } } } -} -- | softplus :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __features__ -> Tensor Build t -- ^ __activations__ softplus = softplus' id softplus' :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __features__ -> Tensor Build t -- ^ __activations__ softplus' op'options features | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs features] return (opDef "Softplus" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "features" type_attr: "T" } output_arg { name: "activations" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_BFLOAT16 type: DT_FLOAT type: DT_DOUBLE } } } -} -- | softplusGrad :: forall v'1 v'2 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __gradients__ -> Tensor v'2 t -- ^ __features__ -> Tensor Build t -- ^ __backprops__ softplusGrad = softplusGrad' id softplusGrad' :: forall v'1 v'2 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __gradients__ -> Tensor v'2 t -- ^ __features__ -> Tensor Build t -- ^ __backprops__ softplusGrad' op'options gradients features | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs gradients, buildInputs features] return (opDef "SoftplusGrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "gradients" type_attr: "T" } input_arg { name: "features" type_attr: "T" } output_arg { name: "backprops" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_BFLOAT16 type: DT_FLOAT type: DT_DOUBLE } } } -} -- | softsign :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __features__ -> Tensor Build t -- ^ __activations__ softsign = softsign' id softsign' :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __features__ -> Tensor Build t -- ^ __activations__ softsign' op'options features | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs features] return (opDef "Softsign" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "features" type_attr: "T" } output_arg { name: "activations" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_BFLOAT16 type: DT_FLOAT type: DT_DOUBLE } } } -} -- | softsignGrad :: forall v'1 v'2 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __gradients__ -> Tensor v'2 t -- ^ __features__ -> Tensor Build t -- ^ __backprops__ softsignGrad = softsignGrad' id softsignGrad' :: forall v'1 v'2 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __gradients__ -> Tensor v'2 t -- ^ __features__ -> Tensor Build t -- ^ __backprops__ softsignGrad' op'options gradients features | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs gradients, buildInputs features] return (opDef "SoftsignGrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "gradients" type_attr: "T" } input_arg { name: "features" type_attr: "T" } output_arg { name: "backprops" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_BFLOAT16 type: DT_FLOAT type: DT_DOUBLE } } } -} -- | spaceToBatch :: forall v'1 v'2 t tpaddings . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tpaddings) => Data.Int.Int64 -- ^ __block_size__ -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 tpaddings -- ^ __paddings__ -> Tensor Build t -- ^ __output__ spaceToBatch = spaceToBatch' id spaceToBatch' :: forall v'1 v'2 t tpaddings . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tpaddings) => OpParams -> Data.Int.Int64 -- ^ __block_size__ -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 tpaddings -- ^ __paddings__ -> Tensor Build t -- ^ __output__ spaceToBatch' op'options block_size input paddings | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs paddings] return (opDef "SpaceToBatch" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tpaddings" .~ tensorType (undefined :: tpaddings) & opAttr "block_size" .~ block_size & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "paddings" type_attr: "Tpaddings" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "Tpaddings" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "block_size" type: "int" has_minimum: true minimum: 2 } -} -- | spaceToBatchND :: forall v'1 v'2 v'3 t tblock_shape tpaddings . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tblock_shape, OneOf '[Data.Int.Int32, Data.Int.Int64] tpaddings) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 tblock_shape -- ^ __block_shape__ -> Tensor v'3 tpaddings -- ^ __paddings__ -> Tensor Build t -- ^ __output__ spaceToBatchND = spaceToBatchND' id spaceToBatchND' :: forall v'1 v'2 v'3 t tblock_shape tpaddings . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tblock_shape, OneOf '[Data.Int.Int32, Data.Int.Int64] tpaddings) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 tblock_shape -- ^ __block_shape__ -> Tensor v'3 tpaddings -- ^ __paddings__ -> Tensor Build t -- ^ __output__ spaceToBatchND' op'options input block_shape paddings | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs block_shape, buildInputs paddings] return (opDef "SpaceToBatchND" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tblock_shape" .~ tensorType (undefined :: tblock_shape) & opAttr "Tpaddings" .~ tensorType (undefined :: tpaddings) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "block_shape" type_attr: "Tblock_shape" } input_arg { name: "paddings" type_attr: "Tpaddings" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "Tblock_shape" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "Tpaddings" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | spaceToDepth :: forall v'1 t . (TensorType t) => Data.Int.Int64 -- ^ __block_size__ -> Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ spaceToDepth = spaceToDepth' id spaceToDepth' :: forall v'1 t . (TensorType t) => OpParams -> Data.Int.Int64 -- ^ __block_size__ -> Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ spaceToDepth' op'options block_size input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "SpaceToDepth" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "block_size" .~ block_size & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "block_size" type: "int" has_minimum: true minimum: 2 } attr { name: "data_format" type: "string" default_value { s: "NHWC" } allowed_values { list { s: "NHWC" s: "NCHW" s: "NCHW_VECT_C" } } } -} -- | sparseAccumulatorApplyGradient :: forall v'2 v'3 v'4 v'5 dtype m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] dtype) => Bool -- ^ __has_known_shape__ -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Data.Int.Int64 -- ^ __local_step__ -> Tensor v'3 Data.Int.Int64 -- ^ __gradient_indices__ -> Tensor v'4 dtype -- ^ __gradient_values__ -> Tensor v'5 Data.Int.Int64 -- ^ __gradient_shape__ -> m' (ControlNode) sparseAccumulatorApplyGradient = sparseAccumulatorApplyGradient' id sparseAccumulatorApplyGradient' :: forall v'2 v'3 v'4 v'5 dtype m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] dtype) => OpParams -> Bool -- ^ __has_known_shape__ -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Data.Int.Int64 -- ^ __local_step__ -> Tensor v'3 Data.Int.Int64 -- ^ __gradient_indices__ -> Tensor v'4 dtype -- ^ __gradient_values__ -> Tensor v'5 Data.Int.Int64 -- ^ __gradient_shape__ -> m' (ControlNode) sparseAccumulatorApplyGradient' op'options has_known_shape handle local_step gradient_indices gradient_values gradient_shape | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs local_step, buildInputs gradient_indices, buildInputs gradient_values, buildInputs gradient_shape] buildOp [] (opDef "SparseAccumulatorApplyGradient" & opAttr "dtype" .~ tensorType (undefined :: dtype) & opAttr "has_known_shape" .~ has_known_shape & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_STRING is_ref: true } input_arg { name: "local_step" type: DT_INT64 } input_arg { name: "gradient_indices" type: DT_INT64 } input_arg { name: "gradient_values" type_attr: "dtype" } input_arg { name: "gradient_shape" type: DT_INT64 } attr { name: "dtype" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "has_known_shape" type: "bool" } -} -- | sparseAccumulatorTakeGradient :: forall v'2 dtype m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] dtype) => Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __num_required__ -> m' ((Tensor Value Data.Int.Int64, Tensor Value dtype, Tensor Value Data.Int.Int64)) -- ^ (__indices__, __values__, __shape__) -- -- * __indices__ -- -- * __values__ -- -- * __shape__ sparseAccumulatorTakeGradient = sparseAccumulatorTakeGradient' id sparseAccumulatorTakeGradient' :: forall v'2 dtype m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] dtype) => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __num_required__ -> m' ((Tensor Value Data.Int.Int64, Tensor Value dtype, Tensor Value Data.Int.Int64)) -- ^ (__indices__, __values__, __shape__) -- -- * __indices__ -- -- * __values__ -- -- * __shape__ sparseAccumulatorTakeGradient' op'options handle num_required | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs num_required] buildOp [] (opDef "SparseAccumulatorTakeGradient" & opAttr "dtype" .~ tensorType (undefined :: dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_STRING is_ref: true } input_arg { name: "num_required" type: DT_INT32 } output_arg { name: "indices" type: DT_INT64 } output_arg { name: "values" type_attr: "dtype" } output_arg { name: "shape" type: DT_INT64 } attr { name: "dtype" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } -} -- | sparseAdd :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 t treal . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] treal) => Tensor v'1 Data.Int.Int64 -- ^ __a_indices__ -> Tensor v'2 t -- ^ __a_values__ -> Tensor v'3 Data.Int.Int64 -- ^ __a_shape__ -> Tensor v'4 Data.Int.Int64 -- ^ __b_indices__ -> Tensor v'5 t -- ^ __b_values__ -> Tensor v'6 Data.Int.Int64 -- ^ __b_shape__ -> Tensor v'7 treal -- ^ __thresh__ -> (Tensor Build Data.Int.Int64, Tensor Build t, Tensor Build Data.Int.Int64) -- ^ (__sum_indices__, __sum_values__, __sum_shape__) -- -- * __sum_indices__ -- -- * __sum_values__ -- -- * __sum_shape__ sparseAdd = sparseAdd' id sparseAdd' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 t treal . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] treal) => OpParams -> Tensor v'1 Data.Int.Int64 -- ^ __a_indices__ -> Tensor v'2 t -- ^ __a_values__ -> Tensor v'3 Data.Int.Int64 -- ^ __a_shape__ -> Tensor v'4 Data.Int.Int64 -- ^ __b_indices__ -> Tensor v'5 t -- ^ __b_values__ -> Tensor v'6 Data.Int.Int64 -- ^ __b_shape__ -> Tensor v'7 treal -- ^ __thresh__ -> (Tensor Build Data.Int.Int64, Tensor Build t, Tensor Build Data.Int.Int64) -- ^ (__sum_indices__, __sum_values__, __sum_shape__) -- -- * __sum_indices__ -- -- * __sum_values__ -- -- * __sum_shape__ sparseAdd' op'options a_indices a_values a_shape b_indices b_values b_shape thresh | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs a_indices, buildInputs a_values, buildInputs a_shape, buildInputs b_indices, buildInputs b_values, buildInputs b_shape, buildInputs thresh] return (opDef "SparseAdd" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Treal" .~ tensorType (undefined :: treal) & op'options & opInputs .~ op'inputs) {- input_arg { name: "a_indices" type: DT_INT64 } input_arg { name: "a_values" type_attr: "T" } input_arg { name: "a_shape" type: DT_INT64 } input_arg { name: "b_indices" type: DT_INT64 } input_arg { name: "b_values" type_attr: "T" } input_arg { name: "b_shape" type: DT_INT64 } input_arg { name: "thresh" type_attr: "Treal" } output_arg { name: "sum_indices" type: DT_INT64 } output_arg { name: "sum_values" type_attr: "T" } output_arg { name: "sum_shape" type: DT_INT64 } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Treal" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_INT64 type: DT_BFLOAT16 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } -} -- | sparseAddGrad :: forall v'1 v'2 v'3 v'4 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __backprop_val_grad__ -> Tensor v'2 Data.Int.Int64 -- ^ __a_indices__ -> Tensor v'3 Data.Int.Int64 -- ^ __b_indices__ -> Tensor v'4 Data.Int.Int64 -- ^ __sum_indices__ -> (Tensor Build t, Tensor Build t) -- ^ (__a_val_grad__, __b_val_grad__) -- -- * __a_val_grad__ -- -- * __b_val_grad__ sparseAddGrad = sparseAddGrad' id sparseAddGrad' :: forall v'1 v'2 v'3 v'4 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __backprop_val_grad__ -> Tensor v'2 Data.Int.Int64 -- ^ __a_indices__ -> Tensor v'3 Data.Int.Int64 -- ^ __b_indices__ -> Tensor v'4 Data.Int.Int64 -- ^ __sum_indices__ -> (Tensor Build t, Tensor Build t) -- ^ (__a_val_grad__, __b_val_grad__) -- -- * __a_val_grad__ -- -- * __b_val_grad__ sparseAddGrad' op'options backprop_val_grad a_indices b_indices sum_indices | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs backprop_val_grad, buildInputs a_indices, buildInputs b_indices, buildInputs sum_indices] return (opDef "SparseAddGrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "backprop_val_grad" type_attr: "T" } input_arg { name: "a_indices" type: DT_INT64 } input_arg { name: "b_indices" type: DT_INT64 } input_arg { name: "sum_indices" type: DT_INT64 } output_arg { name: "a_val_grad" type_attr: "T" } output_arg { name: "b_val_grad" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } -} -- | sparseApplyAdadelta :: forall v'4 v'5 v'6 v'7 v'8 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor Ref t -- ^ __var__ -> Tensor Ref t -- ^ __accum__ -> Tensor Ref t -- ^ __accum_update__ -> Tensor v'4 t -- ^ __lr__ -> Tensor v'5 t -- ^ __rho__ -> Tensor v'6 t -- ^ __epsilon__ -> Tensor v'7 t -- ^ __grad__ -> Tensor v'8 tindices -- ^ __indices__ -> m' (Tensor Ref t) -- ^ __out__ sparseApplyAdadelta = sparseApplyAdadelta' id sparseApplyAdadelta' :: forall v'4 v'5 v'6 v'7 v'8 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor Ref t -- ^ __var__ -> Tensor Ref t -- ^ __accum__ -> Tensor Ref t -- ^ __accum_update__ -> Tensor v'4 t -- ^ __lr__ -> Tensor v'5 t -- ^ __rho__ -> Tensor v'6 t -- ^ __epsilon__ -> Tensor v'7 t -- ^ __grad__ -> Tensor v'8 tindices -- ^ __indices__ -> m' (Tensor Ref t) -- ^ __out__ sparseApplyAdadelta' op'options var accum accum_update lr rho epsilon grad indices | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs accum, buildInputs accum_update, buildInputs lr, buildInputs rho, buildInputs epsilon, buildInputs grad, buildInputs indices] buildOp [] (opDef "SparseApplyAdadelta" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" type_attr: "T" is_ref: true } input_arg { name: "accum" type_attr: "T" is_ref: true } input_arg { name: "accum_update" type_attr: "T" is_ref: true } input_arg { name: "lr" type_attr: "T" } input_arg { name: "rho" type_attr: "T" } input_arg { name: "epsilon" type_attr: "T" } input_arg { name: "grad" type_attr: "T" } input_arg { name: "indices" type_attr: "Tindices" } output_arg { name: "out" type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } } -} -- | sparseApplyAdagrad :: forall v'3 v'4 v'5 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor Ref t -- ^ __var__ -> Tensor Ref t -- ^ __accum__ -> Tensor v'3 t -- ^ __lr__ -> Tensor v'4 t -- ^ __grad__ -> Tensor v'5 tindices -- ^ __indices__ -> m' (Tensor Ref t) -- ^ __out__ sparseApplyAdagrad = sparseApplyAdagrad' id sparseApplyAdagrad' :: forall v'3 v'4 v'5 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor Ref t -- ^ __var__ -> Tensor Ref t -- ^ __accum__ -> Tensor v'3 t -- ^ __lr__ -> Tensor v'4 t -- ^ __grad__ -> Tensor v'5 tindices -- ^ __indices__ -> m' (Tensor Ref t) -- ^ __out__ sparseApplyAdagrad' op'options var accum lr grad indices | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs accum, buildInputs lr, buildInputs grad, buildInputs indices] buildOp [] (opDef "SparseApplyAdagrad" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" type_attr: "T" is_ref: true } input_arg { name: "accum" type_attr: "T" is_ref: true } input_arg { name: "lr" type_attr: "T" } input_arg { name: "grad" type_attr: "T" } input_arg { name: "indices" type_attr: "Tindices" } output_arg { name: "out" type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } } attr { name: "update_slots" type: "bool" default_value { b: true } } -} -- | sparseApplyAdagradDA :: forall v'4 v'5 v'6 v'7 v'8 v'9 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor Ref t -- ^ __var__ -> Tensor Ref t -- ^ __gradient_accumulator__ -> Tensor Ref t -- ^ __gradient_squared_accumulator__ -> Tensor v'4 t -- ^ __grad__ -> Tensor v'5 tindices -- ^ __indices__ -> Tensor v'6 t -- ^ __lr__ -> Tensor v'7 t -- ^ __l1__ -> Tensor v'8 t -- ^ __l2__ -> Tensor v'9 Data.Int.Int64 -- ^ __global_step__ -> m' (Tensor Ref t) -- ^ __out__ sparseApplyAdagradDA = sparseApplyAdagradDA' id sparseApplyAdagradDA' :: forall v'4 v'5 v'6 v'7 v'8 v'9 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor Ref t -- ^ __var__ -> Tensor Ref t -- ^ __gradient_accumulator__ -> Tensor Ref t -- ^ __gradient_squared_accumulator__ -> Tensor v'4 t -- ^ __grad__ -> Tensor v'5 tindices -- ^ __indices__ -> Tensor v'6 t -- ^ __lr__ -> Tensor v'7 t -- ^ __l1__ -> Tensor v'8 t -- ^ __l2__ -> Tensor v'9 Data.Int.Int64 -- ^ __global_step__ -> m' (Tensor Ref t) -- ^ __out__ sparseApplyAdagradDA' op'options var gradient_accumulator gradient_squared_accumulator grad indices lr l1 l2 global_step | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs gradient_accumulator, buildInputs gradient_squared_accumulator, buildInputs grad, buildInputs indices, buildInputs lr, buildInputs l1, buildInputs l2, buildInputs global_step] buildOp [] (opDef "SparseApplyAdagradDA" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" type_attr: "T" is_ref: true } input_arg { name: "gradient_accumulator" type_attr: "T" is_ref: true } input_arg { name: "gradient_squared_accumulator" type_attr: "T" is_ref: true } input_arg { name: "grad" type_attr: "T" } input_arg { name: "indices" type_attr: "Tindices" } input_arg { name: "lr" type_attr: "T" } input_arg { name: "l1" type_attr: "T" } input_arg { name: "l2" type_attr: "T" } input_arg { name: "global_step" type: DT_INT64 } output_arg { name: "out" type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } } -} -- | sparseApplyCenteredRMSProp :: forall v'5 v'6 v'7 v'8 v'9 v'10 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor Ref t -- ^ __var__ -> Tensor Ref t -- ^ __mg__ -> Tensor Ref t -- ^ __ms__ -> Tensor Ref t -- ^ __mom__ -> Tensor v'5 t -- ^ __lr__ -> Tensor v'6 t -- ^ __rho__ -> Tensor v'7 t -- ^ __momentum__ -> Tensor v'8 t -- ^ __epsilon__ -> Tensor v'9 t -- ^ __grad__ -> Tensor v'10 tindices -- ^ __indices__ -> m' (Tensor Ref t) -- ^ __out__ sparseApplyCenteredRMSProp = sparseApplyCenteredRMSProp' id sparseApplyCenteredRMSProp' :: forall v'5 v'6 v'7 v'8 v'9 v'10 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor Ref t -- ^ __var__ -> Tensor Ref t -- ^ __mg__ -> Tensor Ref t -- ^ __ms__ -> Tensor Ref t -- ^ __mom__ -> Tensor v'5 t -- ^ __lr__ -> Tensor v'6 t -- ^ __rho__ -> Tensor v'7 t -- ^ __momentum__ -> Tensor v'8 t -- ^ __epsilon__ -> Tensor v'9 t -- ^ __grad__ -> Tensor v'10 tindices -- ^ __indices__ -> m' (Tensor Ref t) -- ^ __out__ sparseApplyCenteredRMSProp' op'options var mg ms mom lr rho momentum epsilon grad indices | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs mg, buildInputs ms, buildInputs mom, buildInputs lr, buildInputs rho, buildInputs momentum, buildInputs epsilon, buildInputs grad, buildInputs indices] buildOp [] (opDef "SparseApplyCenteredRMSProp" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" type_attr: "T" is_ref: true } input_arg { name: "mg" type_attr: "T" is_ref: true } input_arg { name: "ms" type_attr: "T" is_ref: true } input_arg { name: "mom" type_attr: "T" is_ref: true } input_arg { name: "lr" type_attr: "T" } input_arg { name: "rho" type_attr: "T" } input_arg { name: "momentum" type_attr: "T" } input_arg { name: "epsilon" type_attr: "T" } input_arg { name: "grad" type_attr: "T" } input_arg { name: "indices" type_attr: "Tindices" } output_arg { name: "out" type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } } -} -- | sparseApplyFtrl :: forall v'4 v'5 v'6 v'7 v'8 v'9 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor Ref t -- ^ __var__ -> Tensor Ref t -- ^ __accum__ -> Tensor Ref t -- ^ __linear__ -> Tensor v'4 t -- ^ __grad__ -> Tensor v'5 tindices -- ^ __indices__ -> Tensor v'6 t -- ^ __lr__ -> Tensor v'7 t -- ^ __l1__ -> Tensor v'8 t -- ^ __l2__ -> Tensor v'9 t -- ^ __lr_power__ -> m' (Tensor Ref t) -- ^ __out__ sparseApplyFtrl = sparseApplyFtrl' id sparseApplyFtrl' :: forall v'4 v'5 v'6 v'7 v'8 v'9 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor Ref t -- ^ __var__ -> Tensor Ref t -- ^ __accum__ -> Tensor Ref t -- ^ __linear__ -> Tensor v'4 t -- ^ __grad__ -> Tensor v'5 tindices -- ^ __indices__ -> Tensor v'6 t -- ^ __lr__ -> Tensor v'7 t -- ^ __l1__ -> Tensor v'8 t -- ^ __l2__ -> Tensor v'9 t -- ^ __lr_power__ -> m' (Tensor Ref t) -- ^ __out__ sparseApplyFtrl' op'options var accum linear grad indices lr l1 l2 lr_power | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs accum, buildInputs linear, buildInputs grad, buildInputs indices, buildInputs lr, buildInputs l1, buildInputs l2, buildInputs lr_power] buildOp [] (opDef "SparseApplyFtrl" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" type_attr: "T" is_ref: true } input_arg { name: "accum" type_attr: "T" is_ref: true } input_arg { name: "linear" type_attr: "T" is_ref: true } input_arg { name: "grad" type_attr: "T" } input_arg { name: "indices" type_attr: "Tindices" } input_arg { name: "lr" type_attr: "T" } input_arg { name: "l1" type_attr: "T" } input_arg { name: "l2" type_attr: "T" } input_arg { name: "lr_power" type_attr: "T" } output_arg { name: "out" type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } } -} -- | sparseApplyFtrlV2 :: forall v'4 v'5 v'6 v'7 v'8 v'9 v'10 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor Ref t -- ^ __var__ -> Tensor Ref t -- ^ __accum__ -> Tensor Ref t -- ^ __linear__ -> Tensor v'4 t -- ^ __grad__ -> Tensor v'5 tindices -- ^ __indices__ -> Tensor v'6 t -- ^ __lr__ -> Tensor v'7 t -- ^ __l1__ -> Tensor v'8 t -- ^ __l2__ -> Tensor v'9 t -- ^ __l2_shrinkage__ -> Tensor v'10 t -- ^ __lr_power__ -> m' (Tensor Ref t) -- ^ __out__ sparseApplyFtrlV2 = sparseApplyFtrlV2' id sparseApplyFtrlV2' :: forall v'4 v'5 v'6 v'7 v'8 v'9 v'10 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor Ref t -- ^ __var__ -> Tensor Ref t -- ^ __accum__ -> Tensor Ref t -- ^ __linear__ -> Tensor v'4 t -- ^ __grad__ -> Tensor v'5 tindices -- ^ __indices__ -> Tensor v'6 t -- ^ __lr__ -> Tensor v'7 t -- ^ __l1__ -> Tensor v'8 t -- ^ __l2__ -> Tensor v'9 t -- ^ __l2_shrinkage__ -> Tensor v'10 t -- ^ __lr_power__ -> m' (Tensor Ref t) -- ^ __out__ sparseApplyFtrlV2' op'options var accum linear grad indices lr l1 l2 l2_shrinkage lr_power | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs accum, buildInputs linear, buildInputs grad, buildInputs indices, buildInputs lr, buildInputs l1, buildInputs l2, buildInputs l2_shrinkage, buildInputs lr_power] buildOp [] (opDef "SparseApplyFtrlV2" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" type_attr: "T" is_ref: true } input_arg { name: "accum" type_attr: "T" is_ref: true } input_arg { name: "linear" type_attr: "T" is_ref: true } input_arg { name: "grad" type_attr: "T" } input_arg { name: "indices" type_attr: "Tindices" } input_arg { name: "lr" type_attr: "T" } input_arg { name: "l1" type_attr: "T" } input_arg { name: "l2" type_attr: "T" } input_arg { name: "l2_shrinkage" type_attr: "T" } input_arg { name: "lr_power" type_attr: "T" } output_arg { name: "out" type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } } -} -- | sparseApplyMomentum :: forall v'3 v'4 v'5 v'6 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor Ref t -- ^ __var__ -> Tensor Ref t -- ^ __accum__ -> Tensor v'3 t -- ^ __lr__ -> Tensor v'4 t -- ^ __grad__ -> Tensor v'5 tindices -- ^ __indices__ -> Tensor v'6 t -- ^ __momentum__ -> m' (Tensor Ref t) -- ^ __out__ sparseApplyMomentum = sparseApplyMomentum' id sparseApplyMomentum' :: forall v'3 v'4 v'5 v'6 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor Ref t -- ^ __var__ -> Tensor Ref t -- ^ __accum__ -> Tensor v'3 t -- ^ __lr__ -> Tensor v'4 t -- ^ __grad__ -> Tensor v'5 tindices -- ^ __indices__ -> Tensor v'6 t -- ^ __momentum__ -> m' (Tensor Ref t) -- ^ __out__ sparseApplyMomentum' op'options var accum lr grad indices momentum | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs accum, buildInputs lr, buildInputs grad, buildInputs indices, buildInputs momentum] buildOp [] (opDef "SparseApplyMomentum" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" type_attr: "T" is_ref: true } input_arg { name: "accum" type_attr: "T" is_ref: true } input_arg { name: "lr" type_attr: "T" } input_arg { name: "grad" type_attr: "T" } input_arg { name: "indices" type_attr: "Tindices" } input_arg { name: "momentum" type_attr: "T" } output_arg { name: "out" type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } } attr { name: "use_nesterov" type: "bool" default_value { b: false } } -} -- | sparseApplyProximalAdagrad :: forall v'3 v'4 v'5 v'6 v'7 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor Ref t -- ^ __var__ -> Tensor Ref t -- ^ __accum__ -> Tensor v'3 t -- ^ __lr__ -> Tensor v'4 t -- ^ __l1__ -> Tensor v'5 t -- ^ __l2__ -> Tensor v'6 t -- ^ __grad__ -> Tensor v'7 tindices -- ^ __indices__ -> m' (Tensor Ref t) -- ^ __out__ sparseApplyProximalAdagrad = sparseApplyProximalAdagrad' id sparseApplyProximalAdagrad' :: forall v'3 v'4 v'5 v'6 v'7 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor Ref t -- ^ __var__ -> Tensor Ref t -- ^ __accum__ -> Tensor v'3 t -- ^ __lr__ -> Tensor v'4 t -- ^ __l1__ -> Tensor v'5 t -- ^ __l2__ -> Tensor v'6 t -- ^ __grad__ -> Tensor v'7 tindices -- ^ __indices__ -> m' (Tensor Ref t) -- ^ __out__ sparseApplyProximalAdagrad' op'options var accum lr l1 l2 grad indices | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs accum, buildInputs lr, buildInputs l1, buildInputs l2, buildInputs grad, buildInputs indices] buildOp [] (opDef "SparseApplyProximalAdagrad" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" type_attr: "T" is_ref: true } input_arg { name: "accum" type_attr: "T" is_ref: true } input_arg { name: "lr" type_attr: "T" } input_arg { name: "l1" type_attr: "T" } input_arg { name: "l2" type_attr: "T" } input_arg { name: "grad" type_attr: "T" } input_arg { name: "indices" type_attr: "Tindices" } output_arg { name: "out" type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } } -} -- | sparseApplyProximalGradientDescent :: forall v'2 v'3 v'4 v'5 v'6 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor Ref t -- ^ __var__ -> Tensor v'2 t -- ^ __alpha__ -> Tensor v'3 t -- ^ __l1__ -> Tensor v'4 t -- ^ __l2__ -> Tensor v'5 t -- ^ __grad__ -> Tensor v'6 tindices -- ^ __indices__ -> m' (Tensor Ref t) -- ^ __out__ sparseApplyProximalGradientDescent = sparseApplyProximalGradientDescent' id sparseApplyProximalGradientDescent' :: forall v'2 v'3 v'4 v'5 v'6 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor Ref t -- ^ __var__ -> Tensor v'2 t -- ^ __alpha__ -> Tensor v'3 t -- ^ __l1__ -> Tensor v'4 t -- ^ __l2__ -> Tensor v'5 t -- ^ __grad__ -> Tensor v'6 tindices -- ^ __indices__ -> m' (Tensor Ref t) -- ^ __out__ sparseApplyProximalGradientDescent' op'options var alpha l1 l2 grad indices | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs alpha, buildInputs l1, buildInputs l2, buildInputs grad, buildInputs indices] buildOp [] (opDef "SparseApplyProximalGradientDescent" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" type_attr: "T" is_ref: true } input_arg { name: "alpha" type_attr: "T" } input_arg { name: "l1" type_attr: "T" } input_arg { name: "l2" type_attr: "T" } input_arg { name: "grad" type_attr: "T" } input_arg { name: "indices" type_attr: "Tindices" } output_arg { name: "out" type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } } -} -- | sparseApplyRMSProp :: forall v'4 v'5 v'6 v'7 v'8 v'9 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor Ref t -- ^ __var__ -> Tensor Ref t -- ^ __ms__ -> Tensor Ref t -- ^ __mom__ -> Tensor v'4 t -- ^ __lr__ -> Tensor v'5 t -- ^ __rho__ -> Tensor v'6 t -- ^ __momentum__ -> Tensor v'7 t -- ^ __epsilon__ -> Tensor v'8 t -- ^ __grad__ -> Tensor v'9 tindices -- ^ __indices__ -> m' (Tensor Ref t) -- ^ __out__ sparseApplyRMSProp = sparseApplyRMSProp' id sparseApplyRMSProp' :: forall v'4 v'5 v'6 v'7 v'8 v'9 t tindices m' . (MonadBuild m', OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor Ref t -- ^ __var__ -> Tensor Ref t -- ^ __ms__ -> Tensor Ref t -- ^ __mom__ -> Tensor v'4 t -- ^ __lr__ -> Tensor v'5 t -- ^ __rho__ -> Tensor v'6 t -- ^ __momentum__ -> Tensor v'7 t -- ^ __epsilon__ -> Tensor v'8 t -- ^ __grad__ -> Tensor v'9 tindices -- ^ __indices__ -> m' (Tensor Ref t) -- ^ __out__ sparseApplyRMSProp' op'options var ms mom lr rho momentum epsilon grad indices | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs var, buildInputs ms, buildInputs mom, buildInputs lr, buildInputs rho, buildInputs momentum, buildInputs epsilon, buildInputs grad, buildInputs indices] buildOp [] (opDef "SparseApplyRMSProp" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "var" type_attr: "T" is_ref: true } input_arg { name: "ms" type_attr: "T" is_ref: true } input_arg { name: "mom" type_attr: "T" is_ref: true } input_arg { name: "lr" type_attr: "T" } input_arg { name: "rho" type_attr: "T" } input_arg { name: "momentum" type_attr: "T" } input_arg { name: "epsilon" type_attr: "T" } input_arg { name: "grad" type_attr: "T" } input_arg { name: "indices" type_attr: "Tindices" } output_arg { name: "out" type_attr: "T" is_ref: true } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "use_locking" type: "bool" default_value { b: false } } -} -- | sparseConcat :: forall v'1 v'2 v'3 t . (TensorType t) => Data.Int.Int64 -- ^ __concat_dim__ -> [Tensor v'1 Data.Int.Int64] -- ^ __indices__ -> [Tensor v'2 t] -- ^ __values__ -> [Tensor v'3 Data.Int.Int64] -- ^ __shapes__ -> (Tensor Build Data.Int.Int64, Tensor Build t, Tensor Build Data.Int.Int64) -- ^ (__output_indices__, __output_values__, __output_shape__) -- -- * __output_indices__ -- -- * __output_values__ -- -- * __output_shape__ sparseConcat = sparseConcat' id sparseConcat' :: forall v'1 v'2 v'3 t . (TensorType t) => OpParams -> Data.Int.Int64 -- ^ __concat_dim__ -> [Tensor v'1 Data.Int.Int64] -- ^ __indices__ -> [Tensor v'2 t] -- ^ __values__ -> [Tensor v'3 Data.Int.Int64] -- ^ __shapes__ -> (Tensor Build Data.Int.Int64, Tensor Build t, Tensor Build Data.Int.Int64) -- ^ (__output_indices__, __output_values__, __output_shape__) -- -- * __output_indices__ -- -- * __output_values__ -- -- * __output_shape__ sparseConcat' op'options concat_dim indices values shapes | eqLengthGuard [("N", [("indices", length indices), ("values", length values), ("shapes", length shapes)])] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs indices, buildInputs values, buildInputs shapes] return (opDef "SparseConcat" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "concat_dim" .~ concat_dim & opAttr "N" .~ n & op'options & opInputs .~ op'inputs) where n = fromIntegral (length indices) :: Int64 {- input_arg { name: "indices" type: DT_INT64 number_attr: "N" } input_arg { name: "values" type_attr: "T" number_attr: "N" } input_arg { name: "shapes" type: DT_INT64 number_attr: "N" } output_arg { name: "output_indices" type: DT_INT64 } output_arg { name: "output_values" type_attr: "T" } output_arg { name: "output_shape" type: DT_INT64 } attr { name: "concat_dim" type: "int" } attr { name: "N" type: "int" has_minimum: true minimum: 2 } attr { name: "T" type: "type" } -} -- | sparseConditionalAccumulator :: forall m' . (MonadBuild m') => DataType -- ^ __dtype__ -> Shape -- ^ __shape__ -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __handle__ sparseConditionalAccumulator = sparseConditionalAccumulator' id sparseConditionalAccumulator' :: forall m' . (MonadBuild m') => OpParams -> DataType -- ^ __dtype__ -> Shape -- ^ __shape__ -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __handle__ sparseConditionalAccumulator' op'options dtype shape | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "SparseConditionalAccumulator" & opAttr "dtype" .~ dtype & opAttr "shape" .~ shape & op'options & opInputs .~ op'inputs) {- output_arg { name: "handle" type: DT_STRING is_ref: true } attr { name: "dtype" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "shape" type: "shape" } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } attr { name: "reduction_type" type: "string" default_value { s: "MEAN" } allowed_values { list { s: "MEAN" s: "SUM" } } } -} -- | sparseCross :: forall v'1 v'2 v'3 v'4 sparse_types dense_types out_type . (OneOfs '[Data.ByteString.ByteString, Data.Int.Int64] sparse_types, OneOfs '[Data.ByteString.ByteString, Data.Int.Int64] dense_types, OneOf '[Data.ByteString.ByteString, Data.Int.Int64] out_type) => Data.Int.Int64 -- ^ __hash_key__ -> Bool -- ^ __hashed_output__ -> DataType -- ^ __internal_type__ -> Data.Int.Int64 -- ^ __num_buckets__ -> [Tensor v'1 Data.Int.Int64] -- ^ __indices__ -> TensorList (v'2) sparse_types -- ^ __values__ -> [Tensor v'3 Data.Int.Int64] -- ^ __shapes__ -> TensorList (v'4) dense_types -- ^ __dense_inputs__ -> (Tensor Build Data.Int.Int64, Tensor Build out_type, Tensor Build Data.Int.Int64) -- ^ (__output_indices__, __output_values__, __output_shape__) -- -- * __output_indices__ -- -- * __output_values__ -- -- * __output_shape__ sparseCross = sparseCross' id sparseCross' :: forall v'1 v'2 v'3 v'4 sparse_types dense_types out_type . (OneOfs '[Data.ByteString.ByteString, Data.Int.Int64] sparse_types, OneOfs '[Data.ByteString.ByteString, Data.Int.Int64] dense_types, OneOf '[Data.ByteString.ByteString, Data.Int.Int64] out_type) => OpParams -> Data.Int.Int64 -- ^ __hash_key__ -> Bool -- ^ __hashed_output__ -> DataType -- ^ __internal_type__ -> Data.Int.Int64 -- ^ __num_buckets__ -> [Tensor v'1 Data.Int.Int64] -- ^ __indices__ -> TensorList (v'2) sparse_types -- ^ __values__ -> [Tensor v'3 Data.Int.Int64] -- ^ __shapes__ -> TensorList (v'4) dense_types -- ^ __dense_inputs__ -> (Tensor Build Data.Int.Int64, Tensor Build out_type, Tensor Build Data.Int.Int64) -- ^ (__output_indices__, __output_values__, __output_shape__) -- -- * __output_indices__ -- -- * __output_values__ -- -- * __output_shape__ sparseCross' op'options hash_key hashed_output internal_type num_buckets indices values shapes dense_inputs | eqLengthGuard [("N", [("indices", length indices), ("shapes", length shapes)])] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs indices, buildInputs values, buildInputs shapes, buildInputs dense_inputs] return (opDef "SparseCross" & opAttr "sparse_types" .~ fromTensorTypes (Proxy :: Proxy sparse_types) & opAttr "dense_types" .~ fromTensorTypes (Proxy :: Proxy dense_types) & opAttr "out_type" .~ tensorType (undefined :: out_type) & opAttr "hash_key" .~ hash_key & opAttr "hashed_output" .~ hashed_output & opAttr "internal_type" .~ internal_type & opAttr "num_buckets" .~ num_buckets & opAttr "N" .~ n & op'options & opInputs .~ op'inputs) where n = fromIntegral (length indices) :: Int64 {- input_arg { name: "indices" type: DT_INT64 number_attr: "N" } input_arg { name: "values" type_list_attr: "sparse_types" } input_arg { name: "shapes" type: DT_INT64 number_attr: "N" } input_arg { name: "dense_inputs" type_list_attr: "dense_types" } output_arg { name: "output_indices" type: DT_INT64 } output_arg { name: "output_values" type_attr: "out_type" } output_arg { name: "output_shape" type: DT_INT64 } attr { name: "N" type: "int" has_minimum: true } attr { name: "hashed_output" type: "bool" } attr { name: "num_buckets" type: "int" has_minimum: true } attr { name: "hash_key" type: "int" } attr { name: "sparse_types" type: "list(type)" has_minimum: true allowed_values { list { type: DT_INT64 type: DT_STRING } } } attr { name: "dense_types" type: "list(type)" has_minimum: true allowed_values { list { type: DT_INT64 type: DT_STRING } } } attr { name: "out_type" type: "type" allowed_values { list { type: DT_INT64 type: DT_STRING } } } attr { name: "internal_type" type: "type" allowed_values { list { type: DT_INT64 type: DT_STRING } } } -} -- | sparseDenseCwiseAdd :: forall v'1 v'2 v'3 v'4 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor v'1 Data.Int.Int64 -- ^ __sp_indices__ -> Tensor v'2 t -- ^ __sp_values__ -> Tensor v'3 Data.Int.Int64 -- ^ __sp_shape__ -> Tensor v'4 t -- ^ __dense__ -> Tensor Build t -- ^ __output__ sparseDenseCwiseAdd = sparseDenseCwiseAdd' id sparseDenseCwiseAdd' :: forall v'1 v'2 v'3 v'4 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 Data.Int.Int64 -- ^ __sp_indices__ -> Tensor v'2 t -- ^ __sp_values__ -> Tensor v'3 Data.Int.Int64 -- ^ __sp_shape__ -> Tensor v'4 t -- ^ __dense__ -> Tensor Build t -- ^ __output__ sparseDenseCwiseAdd' op'options sp_indices sp_values sp_shape dense | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs sp_indices, buildInputs sp_values, buildInputs sp_shape, buildInputs dense] return (opDef "SparseDenseCwiseAdd" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "sp_indices" type: DT_INT64 } input_arg { name: "sp_values" type_attr: "T" } input_arg { name: "sp_shape" type: DT_INT64 } input_arg { name: "dense" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } -} -- | sparseDenseCwiseDiv :: forall v'1 v'2 v'3 v'4 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor v'1 Data.Int.Int64 -- ^ __sp_indices__ -> Tensor v'2 t -- ^ __sp_values__ -> Tensor v'3 Data.Int.Int64 -- ^ __sp_shape__ -> Tensor v'4 t -- ^ __dense__ -> Tensor Build t -- ^ __output__ sparseDenseCwiseDiv = sparseDenseCwiseDiv' id sparseDenseCwiseDiv' :: forall v'1 v'2 v'3 v'4 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 Data.Int.Int64 -- ^ __sp_indices__ -> Tensor v'2 t -- ^ __sp_values__ -> Tensor v'3 Data.Int.Int64 -- ^ __sp_shape__ -> Tensor v'4 t -- ^ __dense__ -> Tensor Build t -- ^ __output__ sparseDenseCwiseDiv' op'options sp_indices sp_values sp_shape dense | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs sp_indices, buildInputs sp_values, buildInputs sp_shape, buildInputs dense] return (opDef "SparseDenseCwiseDiv" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "sp_indices" type: DT_INT64 } input_arg { name: "sp_values" type_attr: "T" } input_arg { name: "sp_shape" type: DT_INT64 } input_arg { name: "dense" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } -} -- | sparseDenseCwiseMul :: forall v'1 v'2 v'3 v'4 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor v'1 Data.Int.Int64 -- ^ __sp_indices__ -> Tensor v'2 t -- ^ __sp_values__ -> Tensor v'3 Data.Int.Int64 -- ^ __sp_shape__ -> Tensor v'4 t -- ^ __dense__ -> Tensor Build t -- ^ __output__ sparseDenseCwiseMul = sparseDenseCwiseMul' id sparseDenseCwiseMul' :: forall v'1 v'2 v'3 v'4 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 Data.Int.Int64 -- ^ __sp_indices__ -> Tensor v'2 t -- ^ __sp_values__ -> Tensor v'3 Data.Int.Int64 -- ^ __sp_shape__ -> Tensor v'4 t -- ^ __dense__ -> Tensor Build t -- ^ __output__ sparseDenseCwiseMul' op'options sp_indices sp_values sp_shape dense | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs sp_indices, buildInputs sp_values, buildInputs sp_shape, buildInputs dense] return (opDef "SparseDenseCwiseMul" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "sp_indices" type: DT_INT64 } input_arg { name: "sp_values" type_attr: "T" } input_arg { name: "sp_shape" type: DT_INT64 } input_arg { name: "dense" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } -} -- | sparseFillEmptyRows :: forall v'1 v'2 v'3 v'4 t . (TensorType t) => Tensor v'1 Data.Int.Int64 -- ^ __indices__ -> Tensor v'2 t -- ^ __values__ -> Tensor v'3 Data.Int.Int64 -- ^ __dense_shape__ -> Tensor v'4 t -- ^ __default_value__ -> (Tensor Build Data.Int.Int64, Tensor Build t, Tensor Build Bool, Tensor Build Data.Int.Int64) -- ^ (__output_indices__, __output_values__, __empty_row_indicator__, __reverse_index_map__) -- -- * __output_indices__ -- -- * __output_values__ -- -- * __empty_row_indicator__ -- -- * __reverse_index_map__ sparseFillEmptyRows = sparseFillEmptyRows' id sparseFillEmptyRows' :: forall v'1 v'2 v'3 v'4 t . (TensorType t) => OpParams -> Tensor v'1 Data.Int.Int64 -- ^ __indices__ -> Tensor v'2 t -- ^ __values__ -> Tensor v'3 Data.Int.Int64 -- ^ __dense_shape__ -> Tensor v'4 t -- ^ __default_value__ -> (Tensor Build Data.Int.Int64, Tensor Build t, Tensor Build Bool, Tensor Build Data.Int.Int64) -- ^ (__output_indices__, __output_values__, __empty_row_indicator__, __reverse_index_map__) -- -- * __output_indices__ -- -- * __output_values__ -- -- * __empty_row_indicator__ -- -- * __reverse_index_map__ sparseFillEmptyRows' op'options indices values dense_shape default_value | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs indices, buildInputs values, buildInputs dense_shape, buildInputs default_value] return (opDef "SparseFillEmptyRows" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "indices" type: DT_INT64 } input_arg { name: "values" type_attr: "T" } input_arg { name: "dense_shape" type: DT_INT64 } input_arg { name: "default_value" type_attr: "T" } output_arg { name: "output_indices" type: DT_INT64 } output_arg { name: "output_values" type_attr: "T" } output_arg { name: "empty_row_indicator" type: DT_BOOL } output_arg { name: "reverse_index_map" type: DT_INT64 } attr { name: "T" type: "type" } -} -- | sparseFillEmptyRowsGrad :: forall v'1 v'2 t . (TensorType t) => Tensor v'1 Data.Int.Int64 -- ^ __reverse_index_map__ -> Tensor v'2 t -- ^ __grad_values__ -> (Tensor Build t, Tensor Build t) -- ^ (__d_values__, __d_default_value__) -- -- * __d_values__ -- -- * __d_default_value__ sparseFillEmptyRowsGrad = sparseFillEmptyRowsGrad' id sparseFillEmptyRowsGrad' :: forall v'1 v'2 t . (TensorType t) => OpParams -> Tensor v'1 Data.Int.Int64 -- ^ __reverse_index_map__ -> Tensor v'2 t -- ^ __grad_values__ -> (Tensor Build t, Tensor Build t) -- ^ (__d_values__, __d_default_value__) -- -- * __d_values__ -- -- * __d_default_value__ sparseFillEmptyRowsGrad' op'options reverse_index_map grad_values | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs reverse_index_map, buildInputs grad_values] return (opDef "SparseFillEmptyRowsGrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "reverse_index_map" type: DT_INT64 } input_arg { name: "grad_values" type_attr: "T" } output_arg { name: "d_values" type_attr: "T" } output_arg { name: "d_default_value" type_attr: "T" } attr { name: "T" type: "type" } -} -- | sparseMatMul :: forall v'1 v'2 ta tb . (OneOf '[Data.Word.Word16, Float] ta, OneOf '[Data.Word.Word16, Float] tb) => Tensor v'1 ta -- ^ __a__ -> Tensor v'2 tb -- ^ __b__ -> Tensor Build Float -- ^ __product__ sparseMatMul = sparseMatMul' id sparseMatMul' :: forall v'1 v'2 ta tb . (OneOf '[Data.Word.Word16, Float] ta, OneOf '[Data.Word.Word16, Float] tb) => OpParams -> Tensor v'1 ta -- ^ __a__ -> Tensor v'2 tb -- ^ __b__ -> Tensor Build Float -- ^ __product__ sparseMatMul' op'options a b | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs a, buildInputs b] return (opDef "SparseMatMul" & opAttr "Ta" .~ tensorType (undefined :: ta) & opAttr "Tb" .~ tensorType (undefined :: tb) & op'options & opInputs .~ op'inputs) {- input_arg { name: "a" type_attr: "Ta" } input_arg { name: "b" type_attr: "Tb" } output_arg { name: "product" type: DT_FLOAT } attr { name: "transpose_a" type: "bool" default_value { b: false } } attr { name: "transpose_b" type: "bool" default_value { b: false } } attr { name: "a_is_sparse" type: "bool" default_value { b: false } } attr { name: "b_is_sparse" type: "bool" default_value { b: false } } attr { name: "Ta" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_FLOAT type: DT_BFLOAT16 } } } attr { name: "Tb" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_FLOAT type: DT_BFLOAT16 } } } -} -- | sparseReduceMax :: forall v'1 v'2 v'3 v'4 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor v'1 Data.Int.Int64 -- ^ __input_indices__ -> Tensor v'2 t -- ^ __input_values__ -> Tensor v'3 Data.Int.Int64 -- ^ __input_shape__ -> Tensor v'4 Data.Int.Int32 -- ^ __reduction_axes__ -> Tensor Build t -- ^ __output__ sparseReduceMax = sparseReduceMax' id sparseReduceMax' :: forall v'1 v'2 v'3 v'4 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 Data.Int.Int64 -- ^ __input_indices__ -> Tensor v'2 t -- ^ __input_values__ -> Tensor v'3 Data.Int.Int64 -- ^ __input_shape__ -> Tensor v'4 Data.Int.Int32 -- ^ __reduction_axes__ -> Tensor Build t -- ^ __output__ sparseReduceMax' op'options input_indices input_values input_shape reduction_axes | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_indices, buildInputs input_values, buildInputs input_shape, buildInputs reduction_axes] return (opDef "SparseReduceMax" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_indices" type: DT_INT64 } input_arg { name: "input_values" type_attr: "T" } input_arg { name: "input_shape" type: DT_INT64 } input_arg { name: "reduction_axes" type: DT_INT32 } output_arg { name: "output" type_attr: "T" } attr { name: "keep_dims" type: "bool" default_value { b: false } } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_INT64 type: DT_BFLOAT16 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } -} -- | sparseReduceMaxSparse :: forall v'1 v'2 v'3 v'4 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor v'1 Data.Int.Int64 -- ^ __input_indices__ -> Tensor v'2 t -- ^ __input_values__ -> Tensor v'3 Data.Int.Int64 -- ^ __input_shape__ -> Tensor v'4 Data.Int.Int32 -- ^ __reduction_axes__ -> (Tensor Build Data.Int.Int64, Tensor Build t, Tensor Build Data.Int.Int64) -- ^ (__output_indices__, __output_values__, __output_shape__) -- -- * __output_indices__ -- -- * __output_values__ -- -- * __output_shape__ sparseReduceMaxSparse = sparseReduceMaxSparse' id sparseReduceMaxSparse' :: forall v'1 v'2 v'3 v'4 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 Data.Int.Int64 -- ^ __input_indices__ -> Tensor v'2 t -- ^ __input_values__ -> Tensor v'3 Data.Int.Int64 -- ^ __input_shape__ -> Tensor v'4 Data.Int.Int32 -- ^ __reduction_axes__ -> (Tensor Build Data.Int.Int64, Tensor Build t, Tensor Build Data.Int.Int64) -- ^ (__output_indices__, __output_values__, __output_shape__) -- -- * __output_indices__ -- -- * __output_values__ -- -- * __output_shape__ sparseReduceMaxSparse' op'options input_indices input_values input_shape reduction_axes | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_indices, buildInputs input_values, buildInputs input_shape, buildInputs reduction_axes] return (opDef "SparseReduceMaxSparse" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_indices" type: DT_INT64 } input_arg { name: "input_values" type_attr: "T" } input_arg { name: "input_shape" type: DT_INT64 } input_arg { name: "reduction_axes" type: DT_INT32 } output_arg { name: "output_indices" type: DT_INT64 } output_arg { name: "output_values" type_attr: "T" } output_arg { name: "output_shape" type: DT_INT64 } attr { name: "keep_dims" type: "bool" default_value { b: false } } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_INT64 type: DT_BFLOAT16 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } -} -- | sparseReduceSum :: forall v'1 v'2 v'3 v'4 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor v'1 Data.Int.Int64 -- ^ __input_indices__ -> Tensor v'2 t -- ^ __input_values__ -> Tensor v'3 Data.Int.Int64 -- ^ __input_shape__ -> Tensor v'4 Data.Int.Int32 -- ^ __reduction_axes__ -> Tensor Build t -- ^ __output__ sparseReduceSum = sparseReduceSum' id sparseReduceSum' :: forall v'1 v'2 v'3 v'4 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 Data.Int.Int64 -- ^ __input_indices__ -> Tensor v'2 t -- ^ __input_values__ -> Tensor v'3 Data.Int.Int64 -- ^ __input_shape__ -> Tensor v'4 Data.Int.Int32 -- ^ __reduction_axes__ -> Tensor Build t -- ^ __output__ sparseReduceSum' op'options input_indices input_values input_shape reduction_axes | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_indices, buildInputs input_values, buildInputs input_shape, buildInputs reduction_axes] return (opDef "SparseReduceSum" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_indices" type: DT_INT64 } input_arg { name: "input_values" type_attr: "T" } input_arg { name: "input_shape" type: DT_INT64 } input_arg { name: "reduction_axes" type: DT_INT32 } output_arg { name: "output" type_attr: "T" } attr { name: "keep_dims" type: "bool" default_value { b: false } } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } -} -- | sparseReduceSumSparse :: forall v'1 v'2 v'3 v'4 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor v'1 Data.Int.Int64 -- ^ __input_indices__ -> Tensor v'2 t -- ^ __input_values__ -> Tensor v'3 Data.Int.Int64 -- ^ __input_shape__ -> Tensor v'4 Data.Int.Int32 -- ^ __reduction_axes__ -> (Tensor Build Data.Int.Int64, Tensor Build t, Tensor Build Data.Int.Int64) -- ^ (__output_indices__, __output_values__, __output_shape__) -- -- * __output_indices__ -- -- * __output_values__ -- -- * __output_shape__ sparseReduceSumSparse = sparseReduceSumSparse' id sparseReduceSumSparse' :: forall v'1 v'2 v'3 v'4 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 Data.Int.Int64 -- ^ __input_indices__ -> Tensor v'2 t -- ^ __input_values__ -> Tensor v'3 Data.Int.Int64 -- ^ __input_shape__ -> Tensor v'4 Data.Int.Int32 -- ^ __reduction_axes__ -> (Tensor Build Data.Int.Int64, Tensor Build t, Tensor Build Data.Int.Int64) -- ^ (__output_indices__, __output_values__, __output_shape__) -- -- * __output_indices__ -- -- * __output_values__ -- -- * __output_shape__ sparseReduceSumSparse' op'options input_indices input_values input_shape reduction_axes | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_indices, buildInputs input_values, buildInputs input_shape, buildInputs reduction_axes] return (opDef "SparseReduceSumSparse" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_indices" type: DT_INT64 } input_arg { name: "input_values" type_attr: "T" } input_arg { name: "input_shape" type: DT_INT64 } input_arg { name: "reduction_axes" type: DT_INT32 } output_arg { name: "output_indices" type: DT_INT64 } output_arg { name: "output_values" type_attr: "T" } output_arg { name: "output_shape" type: DT_INT64 } attr { name: "keep_dims" type: "bool" default_value { b: false } } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } -} -- | sparseReorder :: forall v'1 v'2 v'3 t . (TensorType t) => Tensor v'1 Data.Int.Int64 -- ^ __input_indices__ -> Tensor v'2 t -- ^ __input_values__ -> Tensor v'3 Data.Int.Int64 -- ^ __input_shape__ -> (Tensor Build Data.Int.Int64, Tensor Build t) -- ^ (__output_indices__, __output_values__) -- -- * __output_indices__ -- -- * __output_values__ sparseReorder = sparseReorder' id sparseReorder' :: forall v'1 v'2 v'3 t . (TensorType t) => OpParams -> Tensor v'1 Data.Int.Int64 -- ^ __input_indices__ -> Tensor v'2 t -- ^ __input_values__ -> Tensor v'3 Data.Int.Int64 -- ^ __input_shape__ -> (Tensor Build Data.Int.Int64, Tensor Build t) -- ^ (__output_indices__, __output_values__) -- -- * __output_indices__ -- -- * __output_values__ sparseReorder' op'options input_indices input_values input_shape | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_indices, buildInputs input_values, buildInputs input_shape] return (opDef "SparseReorder" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_indices" type: DT_INT64 } input_arg { name: "input_values" type_attr: "T" } input_arg { name: "input_shape" type: DT_INT64 } output_arg { name: "output_indices" type: DT_INT64 } output_arg { name: "output_values" type_attr: "T" } attr { name: "T" type: "type" } -} -- | sparseReshape :: Tensor v'1 Data.Int.Int64 -- ^ __input_indices__ -> Tensor v'2 Data.Int.Int64 -- ^ __input_shape__ -> Tensor v'3 Data.Int.Int64 -- ^ __new_shape__ -> (Tensor Build Data.Int.Int64, Tensor Build Data.Int.Int64) -- ^ (__output_indices__, __output_shape__) -- -- * __output_indices__ -- -- * __output_shape__ sparseReshape = sparseReshape' id sparseReshape' :: OpParams -> Tensor v'1 Data.Int.Int64 -- ^ __input_indices__ -> Tensor v'2 Data.Int.Int64 -- ^ __input_shape__ -> Tensor v'3 Data.Int.Int64 -- ^ __new_shape__ -> (Tensor Build Data.Int.Int64, Tensor Build Data.Int.Int64) -- ^ (__output_indices__, __output_shape__) -- -- * __output_indices__ -- -- * __output_shape__ sparseReshape' op'options input_indices input_shape new_shape | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_indices, buildInputs input_shape, buildInputs new_shape] return (opDef "SparseReshape" & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_indices" type: DT_INT64 } input_arg { name: "input_shape" type: DT_INT64 } input_arg { name: "new_shape" type: DT_INT64 } output_arg { name: "output_indices" type: DT_INT64 } output_arg { name: "output_shape" type: DT_INT64 } -} -- | sparseSegmentMean :: forall v'1 v'2 v'3 t tidx . (OneOf '[Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => Tensor v'1 t -- ^ __data__ -> Tensor v'2 tidx -- ^ __indices__ -> Tensor v'3 Data.Int.Int32 -- ^ __segment_ids__ -> Tensor Build t -- ^ __output__ sparseSegmentMean = sparseSegmentMean' id sparseSegmentMean' :: forall v'1 v'2 v'3 t tidx . (OneOf '[Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => OpParams -> Tensor v'1 t -- ^ __data__ -> Tensor v'2 tidx -- ^ __indices__ -> Tensor v'3 Data.Int.Int32 -- ^ __segment_ids__ -> Tensor Build t -- ^ __output__ sparseSegmentMean' op'options data' indices segment_ids | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs data', buildInputs indices, buildInputs segment_ids] return (opDef "SparseSegmentMean" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tidx" .~ tensorType (undefined :: tidx) & op'options & opInputs .~ op'inputs) {- input_arg { name: "data" type_attr: "T" } input_arg { name: "indices" type_attr: "Tidx" } input_arg { name: "segment_ids" type: DT_INT32 } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "Tidx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | sparseSegmentMeanGrad :: forall v'1 v'2 v'3 v'4 t tidx . (OneOf '[Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => Tensor v'1 t -- ^ __grad__ -> Tensor v'2 tidx -- ^ __indices__ -> Tensor v'3 Data.Int.Int32 -- ^ __segment_ids__ -> Tensor v'4 Data.Int.Int32 -- ^ __output_dim0__ -> Tensor Build t -- ^ __output__ sparseSegmentMeanGrad = sparseSegmentMeanGrad' id sparseSegmentMeanGrad' :: forall v'1 v'2 v'3 v'4 t tidx . (OneOf '[Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => OpParams -> Tensor v'1 t -- ^ __grad__ -> Tensor v'2 tidx -- ^ __indices__ -> Tensor v'3 Data.Int.Int32 -- ^ __segment_ids__ -> Tensor v'4 Data.Int.Int32 -- ^ __output_dim0__ -> Tensor Build t -- ^ __output__ sparseSegmentMeanGrad' op'options grad indices segment_ids output_dim0 | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs grad, buildInputs indices, buildInputs segment_ids, buildInputs output_dim0] return (opDef "SparseSegmentMeanGrad" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tidx" .~ tensorType (undefined :: tidx) & op'options & opInputs .~ op'inputs) {- input_arg { name: "grad" type_attr: "T" } input_arg { name: "indices" type_attr: "Tidx" } input_arg { name: "segment_ids" type: DT_INT32 } input_arg { name: "output_dim0" type: DT_INT32 } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "Tidx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | sparseSegmentMeanWithNumSegments :: forall v'1 v'2 v'3 v'4 t tidx tnumsegments . (OneOf '[Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx, OneOf '[Data.Int.Int32, Data.Int.Int64] tnumsegments) => Tensor v'1 t -- ^ __data__ -> Tensor v'2 tidx -- ^ __indices__ -> Tensor v'3 Data.Int.Int32 -- ^ __segment_ids__ -> Tensor v'4 tnumsegments -- ^ __num_segments__ -> Tensor Build t -- ^ __output__ sparseSegmentMeanWithNumSegments = sparseSegmentMeanWithNumSegments' id sparseSegmentMeanWithNumSegments' :: forall v'1 v'2 v'3 v'4 t tidx tnumsegments . (OneOf '[Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx, OneOf '[Data.Int.Int32, Data.Int.Int64] tnumsegments) => OpParams -> Tensor v'1 t -- ^ __data__ -> Tensor v'2 tidx -- ^ __indices__ -> Tensor v'3 Data.Int.Int32 -- ^ __segment_ids__ -> Tensor v'4 tnumsegments -- ^ __num_segments__ -> Tensor Build t -- ^ __output__ sparseSegmentMeanWithNumSegments' op'options data' indices segment_ids num_segments | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs data', buildInputs indices, buildInputs segment_ids, buildInputs num_segments] return (opDef "SparseSegmentMeanWithNumSegments" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tidx" .~ tensorType (undefined :: tidx) & opAttr "Tnumsegments" .~ tensorType (undefined :: tnumsegments) & op'options & opInputs .~ op'inputs) {- input_arg { name: "data" type_attr: "T" } input_arg { name: "indices" type_attr: "Tidx" } input_arg { name: "segment_ids" type: DT_INT32 } input_arg { name: "num_segments" type_attr: "Tnumsegments" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "Tidx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "Tnumsegments" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | sparseSegmentSqrtN :: forall v'1 v'2 v'3 t tidx . (OneOf '[Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => Tensor v'1 t -- ^ __data__ -> Tensor v'2 tidx -- ^ __indices__ -> Tensor v'3 Data.Int.Int32 -- ^ __segment_ids__ -> Tensor Build t -- ^ __output__ sparseSegmentSqrtN = sparseSegmentSqrtN' id sparseSegmentSqrtN' :: forall v'1 v'2 v'3 t tidx . (OneOf '[Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => OpParams -> Tensor v'1 t -- ^ __data__ -> Tensor v'2 tidx -- ^ __indices__ -> Tensor v'3 Data.Int.Int32 -- ^ __segment_ids__ -> Tensor Build t -- ^ __output__ sparseSegmentSqrtN' op'options data' indices segment_ids | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs data', buildInputs indices, buildInputs segment_ids] return (opDef "SparseSegmentSqrtN" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tidx" .~ tensorType (undefined :: tidx) & op'options & opInputs .~ op'inputs) {- input_arg { name: "data" type_attr: "T" } input_arg { name: "indices" type_attr: "Tidx" } input_arg { name: "segment_ids" type: DT_INT32 } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "Tidx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | sparseSegmentSqrtNGrad :: forall v'1 v'2 v'3 v'4 t tidx . (OneOf '[Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => Tensor v'1 t -- ^ __grad__ -> Tensor v'2 tidx -- ^ __indices__ -> Tensor v'3 Data.Int.Int32 -- ^ __segment_ids__ -> Tensor v'4 Data.Int.Int32 -- ^ __output_dim0__ -> Tensor Build t -- ^ __output__ sparseSegmentSqrtNGrad = sparseSegmentSqrtNGrad' id sparseSegmentSqrtNGrad' :: forall v'1 v'2 v'3 v'4 t tidx . (OneOf '[Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => OpParams -> Tensor v'1 t -- ^ __grad__ -> Tensor v'2 tidx -- ^ __indices__ -> Tensor v'3 Data.Int.Int32 -- ^ __segment_ids__ -> Tensor v'4 Data.Int.Int32 -- ^ __output_dim0__ -> Tensor Build t -- ^ __output__ sparseSegmentSqrtNGrad' op'options grad indices segment_ids output_dim0 | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs grad, buildInputs indices, buildInputs segment_ids, buildInputs output_dim0] return (opDef "SparseSegmentSqrtNGrad" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tidx" .~ tensorType (undefined :: tidx) & op'options & opInputs .~ op'inputs) {- input_arg { name: "grad" type_attr: "T" } input_arg { name: "indices" type_attr: "Tidx" } input_arg { name: "segment_ids" type: DT_INT32 } input_arg { name: "output_dim0" type: DT_INT32 } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "Tidx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | sparseSegmentSqrtNWithNumSegments :: forall v'1 v'2 v'3 v'4 t tidx tnumsegments . (OneOf '[Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx, OneOf '[Data.Int.Int32, Data.Int.Int64] tnumsegments) => Tensor v'1 t -- ^ __data__ -> Tensor v'2 tidx -- ^ __indices__ -> Tensor v'3 Data.Int.Int32 -- ^ __segment_ids__ -> Tensor v'4 tnumsegments -- ^ __num_segments__ -> Tensor Build t -- ^ __output__ sparseSegmentSqrtNWithNumSegments = sparseSegmentSqrtNWithNumSegments' id sparseSegmentSqrtNWithNumSegments' :: forall v'1 v'2 v'3 v'4 t tidx tnumsegments . (OneOf '[Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx, OneOf '[Data.Int.Int32, Data.Int.Int64] tnumsegments) => OpParams -> Tensor v'1 t -- ^ __data__ -> Tensor v'2 tidx -- ^ __indices__ -> Tensor v'3 Data.Int.Int32 -- ^ __segment_ids__ -> Tensor v'4 tnumsegments -- ^ __num_segments__ -> Tensor Build t -- ^ __output__ sparseSegmentSqrtNWithNumSegments' op'options data' indices segment_ids num_segments | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs data', buildInputs indices, buildInputs segment_ids, buildInputs num_segments] return (opDef "SparseSegmentSqrtNWithNumSegments" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tidx" .~ tensorType (undefined :: tidx) & opAttr "Tnumsegments" .~ tensorType (undefined :: tnumsegments) & op'options & opInputs .~ op'inputs) {- input_arg { name: "data" type_attr: "T" } input_arg { name: "indices" type_attr: "Tidx" } input_arg { name: "segment_ids" type: DT_INT32 } input_arg { name: "num_segments" type_attr: "Tnumsegments" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "Tidx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "Tnumsegments" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | sparseSegmentSum :: forall v'1 v'2 v'3 t tidx . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => Tensor v'1 t -- ^ __data__ -> Tensor v'2 tidx -- ^ __indices__ -> Tensor v'3 Data.Int.Int32 -- ^ __segment_ids__ -> Tensor Build t -- ^ __output__ sparseSegmentSum = sparseSegmentSum' id sparseSegmentSum' :: forall v'1 v'2 v'3 t tidx . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => OpParams -> Tensor v'1 t -- ^ __data__ -> Tensor v'2 tidx -- ^ __indices__ -> Tensor v'3 Data.Int.Int32 -- ^ __segment_ids__ -> Tensor Build t -- ^ __output__ sparseSegmentSum' op'options data' indices segment_ids | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs data', buildInputs indices, buildInputs segment_ids] return (opDef "SparseSegmentSum" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tidx" .~ tensorType (undefined :: tidx) & op'options & opInputs .~ op'inputs) {- input_arg { name: "data" type_attr: "T" } input_arg { name: "indices" type_attr: "Tidx" } input_arg { name: "segment_ids" type: DT_INT32 } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_INT64 type: DT_BFLOAT16 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tidx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | sparseSegmentSumWithNumSegments :: forall v'1 v'2 v'3 v'4 t tidx tnumsegments . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx, OneOf '[Data.Int.Int32, Data.Int.Int64] tnumsegments) => Tensor v'1 t -- ^ __data__ -> Tensor v'2 tidx -- ^ __indices__ -> Tensor v'3 Data.Int.Int32 -- ^ __segment_ids__ -> Tensor v'4 tnumsegments -- ^ __num_segments__ -> Tensor Build t -- ^ __output__ sparseSegmentSumWithNumSegments = sparseSegmentSumWithNumSegments' id sparseSegmentSumWithNumSegments' :: forall v'1 v'2 v'3 v'4 t tidx tnumsegments . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx, OneOf '[Data.Int.Int32, Data.Int.Int64] tnumsegments) => OpParams -> Tensor v'1 t -- ^ __data__ -> Tensor v'2 tidx -- ^ __indices__ -> Tensor v'3 Data.Int.Int32 -- ^ __segment_ids__ -> Tensor v'4 tnumsegments -- ^ __num_segments__ -> Tensor Build t -- ^ __output__ sparseSegmentSumWithNumSegments' op'options data' indices segment_ids num_segments | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs data', buildInputs indices, buildInputs segment_ids, buildInputs num_segments] return (opDef "SparseSegmentSumWithNumSegments" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tidx" .~ tensorType (undefined :: tidx) & opAttr "Tnumsegments" .~ tensorType (undefined :: tnumsegments) & op'options & opInputs .~ op'inputs) {- input_arg { name: "data" type_attr: "T" } input_arg { name: "indices" type_attr: "Tidx" } input_arg { name: "segment_ids" type: DT_INT32 } input_arg { name: "num_segments" type_attr: "Tnumsegments" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_INT64 type: DT_BFLOAT16 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tidx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "Tnumsegments" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | sparseSlice :: forall v'1 v'2 v'3 v'4 v'5 t . (TensorType t) => Tensor v'1 Data.Int.Int64 -- ^ __indices__ -> Tensor v'2 t -- ^ __values__ -> Tensor v'3 Data.Int.Int64 -- ^ __shape__ -> Tensor v'4 Data.Int.Int64 -- ^ __start__ -> Tensor v'5 Data.Int.Int64 -- ^ __size__ -> (Tensor Build Data.Int.Int64, Tensor Build t, Tensor Build Data.Int.Int64) -- ^ (__output_indices__, __output_values__, __output_shape__) -- -- * __output_indices__ -- -- * __output_values__ -- -- * __output_shape__ sparseSlice = sparseSlice' id sparseSlice' :: forall v'1 v'2 v'3 v'4 v'5 t . (TensorType t) => OpParams -> Tensor v'1 Data.Int.Int64 -- ^ __indices__ -> Tensor v'2 t -- ^ __values__ -> Tensor v'3 Data.Int.Int64 -- ^ __shape__ -> Tensor v'4 Data.Int.Int64 -- ^ __start__ -> Tensor v'5 Data.Int.Int64 -- ^ __size__ -> (Tensor Build Data.Int.Int64, Tensor Build t, Tensor Build Data.Int.Int64) -- ^ (__output_indices__, __output_values__, __output_shape__) -- -- * __output_indices__ -- -- * __output_values__ -- -- * __output_shape__ sparseSlice' op'options indices values shape start size | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs indices, buildInputs values, buildInputs shape, buildInputs start, buildInputs size] return (opDef "SparseSlice" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "indices" type: DT_INT64 } input_arg { name: "values" type_attr: "T" } input_arg { name: "shape" type: DT_INT64 } input_arg { name: "start" type: DT_INT64 } input_arg { name: "size" type: DT_INT64 } output_arg { name: "output_indices" type: DT_INT64 } output_arg { name: "output_values" type_attr: "T" } output_arg { name: "output_shape" type: DT_INT64 } attr { name: "T" type: "type" } -} -- | sparseSliceGrad :: forall v'1 v'2 v'3 v'4 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __backprop_val_grad__ -> Tensor v'2 Data.Int.Int64 -- ^ __input_indices__ -> Tensor v'3 Data.Int.Int64 -- ^ __input_start__ -> Tensor v'4 Data.Int.Int64 -- ^ __output_indices__ -> Tensor Build t -- ^ __val_grad__ sparseSliceGrad = sparseSliceGrad' id sparseSliceGrad' :: forall v'1 v'2 v'3 v'4 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __backprop_val_grad__ -> Tensor v'2 Data.Int.Int64 -- ^ __input_indices__ -> Tensor v'3 Data.Int.Int64 -- ^ __input_start__ -> Tensor v'4 Data.Int.Int64 -- ^ __output_indices__ -> Tensor Build t -- ^ __val_grad__ sparseSliceGrad' op'options backprop_val_grad input_indices input_start output_indices | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs backprop_val_grad, buildInputs input_indices, buildInputs input_start, buildInputs output_indices] return (opDef "SparseSliceGrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "backprop_val_grad" type_attr: "T" } input_arg { name: "input_indices" type: DT_INT64 } input_arg { name: "input_start" type: DT_INT64 } input_arg { name: "output_indices" type: DT_INT64 } output_arg { name: "val_grad" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } -} -- | sparseSoftmax :: forall v'1 v'2 v'3 t . (OneOf '[Double, Float] t) => Tensor v'1 Data.Int.Int64 -- ^ __sp_indices__ -> Tensor v'2 t -- ^ __sp_values__ -> Tensor v'3 Data.Int.Int64 -- ^ __sp_shape__ -> Tensor Build t -- ^ __output__ sparseSoftmax = sparseSoftmax' id sparseSoftmax' :: forall v'1 v'2 v'3 t . (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 Data.Int.Int64 -- ^ __sp_indices__ -> Tensor v'2 t -- ^ __sp_values__ -> Tensor v'3 Data.Int.Int64 -- ^ __sp_shape__ -> Tensor Build t -- ^ __output__ sparseSoftmax' op'options sp_indices sp_values sp_shape | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs sp_indices, buildInputs sp_values, buildInputs sp_shape] return (opDef "SparseSoftmax" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "sp_indices" type: DT_INT64 } input_arg { name: "sp_values" type_attr: "T" } input_arg { name: "sp_shape" type: DT_INT64 } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } -} -- | sparseSoftmaxCrossEntropyWithLogits :: forall v'1 v'2 t tlabels . (OneOf '[Data.Word.Word16, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tlabels) => Tensor v'1 t -- ^ __features__ -> Tensor v'2 tlabels -- ^ __labels__ -> (Tensor Build t, Tensor Build t) -- ^ (__loss__, __backprop__) -- -- * __loss__ -- -- * __backprop__ sparseSoftmaxCrossEntropyWithLogits = sparseSoftmaxCrossEntropyWithLogits' id sparseSoftmaxCrossEntropyWithLogits' :: forall v'1 v'2 t tlabels . (OneOf '[Data.Word.Word16, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tlabels) => OpParams -> Tensor v'1 t -- ^ __features__ -> Tensor v'2 tlabels -- ^ __labels__ -> (Tensor Build t, Tensor Build t) -- ^ (__loss__, __backprop__) -- -- * __loss__ -- -- * __backprop__ sparseSoftmaxCrossEntropyWithLogits' op'options features labels | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs features, buildInputs labels] return (opDef "SparseSoftmaxCrossEntropyWithLogits" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tlabels" .~ tensorType (undefined :: tlabels) & op'options & opInputs .~ op'inputs) {- input_arg { name: "features" type_attr: "T" } input_arg { name: "labels" type_attr: "Tlabels" } output_arg { name: "loss" type_attr: "T" } output_arg { name: "backprop" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_BFLOAT16 type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "Tlabels" type: "type" default_value { type: DT_INT64 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | sparseSparseMaximum :: forall v'1 v'2 v'3 v'4 v'5 v'6 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor v'1 Data.Int.Int64 -- ^ __a_indices__ -> Tensor v'2 t -- ^ __a_values__ -> Tensor v'3 Data.Int.Int64 -- ^ __a_shape__ -> Tensor v'4 Data.Int.Int64 -- ^ __b_indices__ -> Tensor v'5 t -- ^ __b_values__ -> Tensor v'6 Data.Int.Int64 -- ^ __b_shape__ -> (Tensor Build Data.Int.Int64, Tensor Build t) -- ^ (__output_indices__, __output_values__) -- -- * __output_indices__ -- -- * __output_values__ sparseSparseMaximum = sparseSparseMaximum' id sparseSparseMaximum' :: forall v'1 v'2 v'3 v'4 v'5 v'6 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 Data.Int.Int64 -- ^ __a_indices__ -> Tensor v'2 t -- ^ __a_values__ -> Tensor v'3 Data.Int.Int64 -- ^ __a_shape__ -> Tensor v'4 Data.Int.Int64 -- ^ __b_indices__ -> Tensor v'5 t -- ^ __b_values__ -> Tensor v'6 Data.Int.Int64 -- ^ __b_shape__ -> (Tensor Build Data.Int.Int64, Tensor Build t) -- ^ (__output_indices__, __output_values__) -- -- * __output_indices__ -- -- * __output_values__ sparseSparseMaximum' op'options a_indices a_values a_shape b_indices b_values b_shape | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs a_indices, buildInputs a_values, buildInputs a_shape, buildInputs b_indices, buildInputs b_values, buildInputs b_shape] return (opDef "SparseSparseMaximum" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "a_indices" type: DT_INT64 } input_arg { name: "a_values" type_attr: "T" } input_arg { name: "a_shape" type: DT_INT64 } input_arg { name: "b_indices" type: DT_INT64 } input_arg { name: "b_values" type_attr: "T" } input_arg { name: "b_shape" type: DT_INT64 } output_arg { name: "output_indices" type: DT_INT64 } output_arg { name: "output_values" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_INT64 type: DT_BFLOAT16 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } -} -- | sparseSparseMinimum :: forall v'1 v'2 v'3 v'4 v'5 v'6 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor v'1 Data.Int.Int64 -- ^ __a_indices__ -> Tensor v'2 t -- ^ __a_values__ -> Tensor v'3 Data.Int.Int64 -- ^ __a_shape__ -> Tensor v'4 Data.Int.Int64 -- ^ __b_indices__ -> Tensor v'5 t -- ^ __b_values__ -> Tensor v'6 Data.Int.Int64 -- ^ __b_shape__ -> (Tensor Build Data.Int.Int64, Tensor Build t) -- ^ (__output_indices__, __output_values__) -- -- * __output_indices__ -- -- * __output_values__ sparseSparseMinimum = sparseSparseMinimum' id sparseSparseMinimum' :: forall v'1 v'2 v'3 v'4 v'5 v'6 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 Data.Int.Int64 -- ^ __a_indices__ -> Tensor v'2 t -- ^ __a_values__ -> Tensor v'3 Data.Int.Int64 -- ^ __a_shape__ -> Tensor v'4 Data.Int.Int64 -- ^ __b_indices__ -> Tensor v'5 t -- ^ __b_values__ -> Tensor v'6 Data.Int.Int64 -- ^ __b_shape__ -> (Tensor Build Data.Int.Int64, Tensor Build t) -- ^ (__output_indices__, __output_values__) -- -- * __output_indices__ -- -- * __output_values__ sparseSparseMinimum' op'options a_indices a_values a_shape b_indices b_values b_shape | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs a_indices, buildInputs a_values, buildInputs a_shape, buildInputs b_indices, buildInputs b_values, buildInputs b_shape] return (opDef "SparseSparseMinimum" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "a_indices" type: DT_INT64 } input_arg { name: "a_values" type_attr: "T" } input_arg { name: "a_shape" type: DT_INT64 } input_arg { name: "b_indices" type: DT_INT64 } input_arg { name: "b_values" type_attr: "T" } input_arg { name: "b_shape" type: DT_INT64 } output_arg { name: "output_indices" type: DT_INT64 } output_arg { name: "output_values" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } -} -- | sparseSplit :: forall v'1 v'2 v'3 v'4 t . (TensorType t) => Data.Int.Int64 -- ^ __num_split__ -> Tensor v'1 Data.Int.Int64 -- ^ __split_dim__ -> Tensor v'2 Data.Int.Int64 -- ^ __indices__ -> Tensor v'3 t -- ^ __values__ -> Tensor v'4 Data.Int.Int64 -- ^ __shape__ -> ([Tensor Build Data.Int.Int64], [Tensor Build t], [Tensor Build Data.Int.Int64]) -- ^ (__output_indices__, __output_values__, __output_shape__) -- -- * __output_indices__ -- -- * __output_values__ -- -- * __output_shape__ sparseSplit = sparseSplit' id sparseSplit' :: forall v'1 v'2 v'3 v'4 t . (TensorType t) => OpParams -> Data.Int.Int64 -- ^ __num_split__ -> Tensor v'1 Data.Int.Int64 -- ^ __split_dim__ -> Tensor v'2 Data.Int.Int64 -- ^ __indices__ -> Tensor v'3 t -- ^ __values__ -> Tensor v'4 Data.Int.Int64 -- ^ __shape__ -> ([Tensor Build Data.Int.Int64], [Tensor Build t], [Tensor Build Data.Int.Int64]) -- ^ (__output_indices__, __output_values__, __output_shape__) -- -- * __output_indices__ -- -- * __output_values__ -- -- * __output_shape__ sparseSplit' op'options num_split split_dim indices values shape | eqLengthGuard [] = pureOp [num_split, num_split, num_split] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs split_dim, buildInputs indices, buildInputs values, buildInputs shape] return (opDef "SparseSplit" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "num_split" .~ num_split & op'options & opInputs .~ op'inputs) {- input_arg { name: "split_dim" type: DT_INT64 } input_arg { name: "indices" type: DT_INT64 } input_arg { name: "values" type_attr: "T" } input_arg { name: "shape" type: DT_INT64 } output_arg { name: "output_indices" type: DT_INT64 number_attr: "num_split" } output_arg { name: "output_values" type_attr: "T" number_attr: "num_split" } output_arg { name: "output_shape" type: DT_INT64 number_attr: "num_split" } attr { name: "num_split" type: "int" has_minimum: true minimum: 1 } attr { name: "T" type: "type" } -} -- | sparseTensorDenseAdd :: forall v'1 v'2 v'3 v'4 t tindices . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor v'1 tindices -- ^ __a_indices__ -> Tensor v'2 t -- ^ __a_values__ -> Tensor v'3 tindices -- ^ __a_shape__ -> Tensor v'4 t -- ^ __b__ -> Tensor Build t -- ^ __output__ sparseTensorDenseAdd = sparseTensorDenseAdd' id sparseTensorDenseAdd' :: forall v'1 v'2 v'3 v'4 t tindices . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor v'1 tindices -- ^ __a_indices__ -> Tensor v'2 t -- ^ __a_values__ -> Tensor v'3 tindices -- ^ __a_shape__ -> Tensor v'4 t -- ^ __b__ -> Tensor Build t -- ^ __output__ sparseTensorDenseAdd' op'options a_indices a_values a_shape b | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs a_indices, buildInputs a_values, buildInputs a_shape, buildInputs b] return (opDef "SparseTensorDenseAdd" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "a_indices" type_attr: "Tindices" } input_arg { name: "a_values" type_attr: "T" } input_arg { name: "a_shape" type_attr: "Tindices" } input_arg { name: "b" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | sparseTensorDenseMatMul :: forall v'1 v'2 v'3 v'4 t tindices . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor v'1 tindices -- ^ __a_indices__ -> Tensor v'2 t -- ^ __a_values__ -> Tensor v'3 Data.Int.Int64 -- ^ __a_shape__ -> Tensor v'4 t -- ^ __b__ -> Tensor Build t -- ^ __product__ sparseTensorDenseMatMul = sparseTensorDenseMatMul' id sparseTensorDenseMatMul' :: forall v'1 v'2 v'3 v'4 t tindices . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor v'1 tindices -- ^ __a_indices__ -> Tensor v'2 t -- ^ __a_values__ -> Tensor v'3 Data.Int.Int64 -- ^ __a_shape__ -> Tensor v'4 t -- ^ __b__ -> Tensor Build t -- ^ __product__ sparseTensorDenseMatMul' op'options a_indices a_values a_shape b | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs a_indices, buildInputs a_values, buildInputs a_shape, buildInputs b] return (opDef "SparseTensorDenseMatMul" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "a_indices" type_attr: "Tindices" } input_arg { name: "a_values" type_attr: "T" } input_arg { name: "a_shape" type: DT_INT64 } input_arg { name: "b" type_attr: "T" } output_arg { name: "product" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "Tindices" type: "type" default_value { type: DT_INT64 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "adjoint_a" type: "bool" default_value { b: false } } attr { name: "adjoint_b" type: "bool" default_value { b: false } } -} -- | sparseTensorSliceDataset :: forall v'1 v'2 v'3 tvalues m' . (MonadBuild m', TensorType tvalues) => Tensor v'1 Data.Int.Int64 -- ^ __indices__ -> Tensor v'2 tvalues -- ^ __values__ -> Tensor v'3 Data.Int.Int64 -- ^ __dense_shape__ -> m' (Tensor Value Variant) -- ^ __handle__ sparseTensorSliceDataset = sparseTensorSliceDataset' id sparseTensorSliceDataset' :: forall v'1 v'2 v'3 tvalues m' . (MonadBuild m', TensorType tvalues) => OpParams -> Tensor v'1 Data.Int.Int64 -- ^ __indices__ -> Tensor v'2 tvalues -- ^ __values__ -> Tensor v'3 Data.Int.Int64 -- ^ __dense_shape__ -> m' (Tensor Value Variant) -- ^ __handle__ sparseTensorSliceDataset' op'options indices values dense_shape | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs indices, buildInputs values, buildInputs dense_shape] buildOp [] (opDef "SparseTensorSliceDataset" & opAttr "Tvalues" .~ tensorType (undefined :: tvalues) & op'options & opInputs .~ op'inputs) {- input_arg { name: "indices" type: DT_INT64 } input_arg { name: "values" type_attr: "Tvalues" } input_arg { name: "dense_shape" type: DT_INT64 } output_arg { name: "handle" type: DT_VARIANT } attr { name: "Tvalues" type: "type" } -} -- | sparseToDense :: forall v'1 v'2 v'3 v'4 t tindices . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor v'1 tindices -- ^ __sparse_indices__ -> Tensor v'2 tindices -- ^ __output_shape__ -> Tensor v'3 t -- ^ __sparse_values__ -> Tensor v'4 t -- ^ __default_value__ -> Tensor Build t -- ^ __dense__ sparseToDense = sparseToDense' id sparseToDense' :: forall v'1 v'2 v'3 v'4 t tindices . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor v'1 tindices -- ^ __sparse_indices__ -> Tensor v'2 tindices -- ^ __output_shape__ -> Tensor v'3 t -- ^ __sparse_values__ -> Tensor v'4 t -- ^ __default_value__ -> Tensor Build t -- ^ __dense__ sparseToDense' op'options sparse_indices output_shape sparse_values default_value | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs sparse_indices, buildInputs output_shape, buildInputs sparse_values, buildInputs default_value] return (opDef "SparseToDense" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "sparse_indices" type_attr: "Tindices" } input_arg { name: "output_shape" type_attr: "Tindices" } input_arg { name: "sparse_values" type_attr: "T" } input_arg { name: "default_value" type_attr: "T" } output_arg { name: "dense" type_attr: "T" } attr { name: "validate_indices" type: "bool" default_value { b: true } } attr { name: "T" type: "type" } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | sparseToSparseSetOperation :: forall v'1 v'2 v'3 v'4 v'5 v'6 t . (OneOf '[Data.ByteString.ByteString, Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8] t) => Tensor v'1 Data.Int.Int64 -- ^ __set1_indices__ -> Tensor v'2 t -- ^ __set1_values__ -> Tensor v'3 Data.Int.Int64 -- ^ __set1_shape__ -> Tensor v'4 Data.Int.Int64 -- ^ __set2_indices__ -> Tensor v'5 t -- ^ __set2_values__ -> Tensor v'6 Data.Int.Int64 -- ^ __set2_shape__ -> (Tensor Build Data.Int.Int64, Tensor Build t, Tensor Build Data.Int.Int64) -- ^ (__result_indices__, __result_values__, __result_shape__) -- -- * __result_indices__ -- -- * __result_values__ -- -- * __result_shape__ sparseToSparseSetOperation = sparseToSparseSetOperation' id sparseToSparseSetOperation' :: forall v'1 v'2 v'3 v'4 v'5 v'6 t . (OneOf '[Data.ByteString.ByteString, Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8] t) => OpParams -> Tensor v'1 Data.Int.Int64 -- ^ __set1_indices__ -> Tensor v'2 t -- ^ __set1_values__ -> Tensor v'3 Data.Int.Int64 -- ^ __set1_shape__ -> Tensor v'4 Data.Int.Int64 -- ^ __set2_indices__ -> Tensor v'5 t -- ^ __set2_values__ -> Tensor v'6 Data.Int.Int64 -- ^ __set2_shape__ -> (Tensor Build Data.Int.Int64, Tensor Build t, Tensor Build Data.Int.Int64) -- ^ (__result_indices__, __result_values__, __result_shape__) -- -- * __result_indices__ -- -- * __result_values__ -- -- * __result_shape__ sparseToSparseSetOperation' op'options set1_indices set1_values set1_shape set2_indices set2_values set2_shape | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs set1_indices, buildInputs set1_values, buildInputs set1_shape, buildInputs set2_indices, buildInputs set2_values, buildInputs set2_shape] return (opDef "SparseToSparseSetOperation" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "set1_indices" type: DT_INT64 } input_arg { name: "set1_values" type_attr: "T" } input_arg { name: "set1_shape" type: DT_INT64 } input_arg { name: "set2_indices" type: DT_INT64 } input_arg { name: "set2_values" type_attr: "T" } input_arg { name: "set2_shape" type: DT_INT64 } output_arg { name: "result_indices" type: DT_INT64 } output_arg { name: "result_values" type_attr: "T" } output_arg { name: "result_shape" type: DT_INT64 } attr { name: "set_operation" type: "string" } attr { name: "validate_indices" type: "bool" default_value { b: true } } attr { name: "T" type: "type" allowed_values { list { type: DT_INT8 type: DT_INT16 type: DT_INT32 type: DT_INT64 type: DT_UINT8 type: DT_UINT16 type: DT_STRING } } } -} -- | split :: forall v'1 v'2 t . (TensorType t) => Data.Int.Int64 -- ^ __num_split__ -> Tensor v'1 Data.Int.Int32 -- ^ __split_dim__ -> Tensor v'2 t -- ^ __value__ -> [Tensor Build t] -- ^ __output__ split = split' id split' :: forall v'1 v'2 t . (TensorType t) => OpParams -> Data.Int.Int64 -- ^ __num_split__ -> Tensor v'1 Data.Int.Int32 -- ^ __split_dim__ -> Tensor v'2 t -- ^ __value__ -> [Tensor Build t] -- ^ __output__ split' op'options num_split split_dim value | eqLengthGuard [] = pureOp [num_split] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs split_dim, buildInputs value] return (opDef "Split" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "num_split" .~ num_split & op'options & opInputs .~ op'inputs) {- input_arg { name: "split_dim" type: DT_INT32 } input_arg { name: "value" type_attr: "T" } output_arg { name: "output" type_attr: "T" number_attr: "num_split" } attr { name: "num_split" type: "int" has_minimum: true minimum: 1 } attr { name: "T" type: "type" } -} -- | splitV :: forall v'1 v'2 v'3 t tlen . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tlen) => Data.Int.Int64 -- ^ __num_split__ -> Tensor v'1 t -- ^ __value__ -> Tensor v'2 tlen -- ^ __size_splits__ -> Tensor v'3 Data.Int.Int32 -- ^ __split_dim__ -> [Tensor Build t] -- ^ __output__ splitV = splitV' id splitV' :: forall v'1 v'2 v'3 t tlen . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tlen) => OpParams -> Data.Int.Int64 -- ^ __num_split__ -> Tensor v'1 t -- ^ __value__ -> Tensor v'2 tlen -- ^ __size_splits__ -> Tensor v'3 Data.Int.Int32 -- ^ __split_dim__ -> [Tensor Build t] -- ^ __output__ splitV' op'options num_split value size_splits split_dim | eqLengthGuard [] = pureOp [num_split] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs value, buildInputs size_splits, buildInputs split_dim] return (opDef "SplitV" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tlen" .~ tensorType (undefined :: tlen) & opAttr "num_split" .~ num_split & op'options & opInputs .~ op'inputs) {- input_arg { name: "value" type_attr: "T" } input_arg { name: "size_splits" type_attr: "Tlen" } input_arg { name: "split_dim" type: DT_INT32 } output_arg { name: "output" type_attr: "T" number_attr: "num_split" } attr { name: "num_split" type: "int" has_minimum: true minimum: 1 } attr { name: "T" type: "type" } attr { name: "Tlen" type: "type" default_value { type: DT_INT64 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | sqrt :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ sqrt = sqrt' id sqrt' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ sqrt' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "Sqrt" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | sqrtGrad :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __y__ -> Tensor v'2 t -- ^ __dy__ -> Tensor Build t -- ^ __z__ sqrtGrad = sqrtGrad' id sqrtGrad' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __y__ -> Tensor v'2 t -- ^ __dy__ -> Tensor Build t -- ^ __z__ sqrtGrad' op'options y dy | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs y, buildInputs dy] return (opDef "SqrtGrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "y" type_attr: "T" } input_arg { name: "dy" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | square :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ square = square' id square' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ square' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "Square" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | squaredDifference :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ squaredDifference = squaredDifference' id squaredDifference' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ squaredDifference' op'options x y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y] return (opDef "SquaredDifference" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | squeeze :: forall v'1 t . (TensorType t) => Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ squeeze = squeeze' id squeeze' :: forall v'1 t . (TensorType t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ squeeze' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "Squeeze" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "squeeze_dims" type: "list(int)" default_value { list { } } has_minimum: true } -} -- | stack :: forall m' . (MonadBuild m') => DataType -- ^ __elem_type__ -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __handle__ stack = stack' id stack' :: forall m' . (MonadBuild m') => OpParams -> DataType -- ^ __elem_type__ -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __handle__ stack' op'options elem_type | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "Stack" & opAttr "elem_type" .~ elem_type & op'options & opInputs .~ op'inputs) {- output_arg { name: "handle" type: DT_STRING is_ref: true } attr { name: "elem_type" type: "type" } attr { name: "stack_name" type: "string" default_value { s: "" } } -} -- | stackClose :: forall m' . (MonadBuild m') => Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> m' (ControlNode) stackClose = stackClose' id stackClose' :: forall m' . (MonadBuild m') => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> m' (ControlNode) stackClose' op'options handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle] buildOp [] (opDef "StackClose" & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_STRING is_ref: true } -} -- | stackCloseV2 :: forall v'1 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __handle__ -> m' (ControlNode) stackCloseV2 = stackCloseV2' id stackCloseV2' :: forall v'1 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __handle__ -> m' (ControlNode) stackCloseV2' op'options handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle] buildOp [] (opDef "StackCloseV2" & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_RESOURCE } -} -- | stackPop :: forall elem_type m' . (MonadBuild m', TensorType elem_type) => Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> m' (Tensor Value elem_type) -- ^ __elem__ stackPop = stackPop' id stackPop' :: forall elem_type m' . (MonadBuild m', TensorType elem_type) => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> m' (Tensor Value elem_type) -- ^ __elem__ stackPop' op'options handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle] buildOp [] (opDef "StackPop" & opAttr "elem_type" .~ tensorType (undefined :: elem_type) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_STRING is_ref: true } output_arg { name: "elem" type_attr: "elem_type" } attr { name: "elem_type" type: "type" } -} -- | stackPopV2 :: forall v'1 elem_type m' . (MonadBuild m', TensorType elem_type) => Tensor v'1 ResourceHandle -- ^ __handle__ -> m' (Tensor Value elem_type) -- ^ __elem__ stackPopV2 = stackPopV2' id stackPopV2' :: forall v'1 elem_type m' . (MonadBuild m', TensorType elem_type) => OpParams -> Tensor v'1 ResourceHandle -- ^ __handle__ -> m' (Tensor Value elem_type) -- ^ __elem__ stackPopV2' op'options handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle] buildOp [] (opDef "StackPopV2" & opAttr "elem_type" .~ tensorType (undefined :: elem_type) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_RESOURCE } output_arg { name: "elem" type_attr: "elem_type" } attr { name: "elem_type" type: "type" } -} -- | stackPush :: forall v'2 t m' . (MonadBuild m', TensorType t) => Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 t -- ^ __elem__ -> m' (Tensor Value t) -- ^ __output__ stackPush = stackPush' id stackPush' :: forall v'2 t m' . (MonadBuild m', TensorType t) => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 t -- ^ __elem__ -> m' (Tensor Value t) -- ^ __output__ stackPush' op'options handle elem | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs elem] buildOp [] (opDef "StackPush" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_STRING is_ref: true } input_arg { name: "elem" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "swap_memory" type: "bool" default_value { b: false } } -} -- | stackPushV2 :: forall v'1 v'2 t m' . (MonadBuild m', TensorType t) => Tensor v'1 ResourceHandle -- ^ __handle__ -> Tensor v'2 t -- ^ __elem__ -> m' (Tensor Value t) -- ^ __output__ stackPushV2 = stackPushV2' id stackPushV2' :: forall v'1 v'2 t m' . (MonadBuild m', TensorType t) => OpParams -> Tensor v'1 ResourceHandle -- ^ __handle__ -> Tensor v'2 t -- ^ __elem__ -> m' (Tensor Value t) -- ^ __output__ stackPushV2' op'options handle elem | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs elem] buildOp [] (opDef "StackPushV2" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_RESOURCE } input_arg { name: "elem" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "swap_memory" type: "bool" default_value { b: false } } -} -- | stackV2 :: forall v'1 m' . (MonadBuild m') => DataType -- ^ __elem_type__ -> Tensor v'1 Data.Int.Int32 -- ^ __max_size__ -> m' (Tensor Value ResourceHandle) -- ^ __handle__ stackV2 = stackV2' id stackV2' :: forall v'1 m' . (MonadBuild m') => OpParams -> DataType -- ^ __elem_type__ -> Tensor v'1 Data.Int.Int32 -- ^ __max_size__ -> m' (Tensor Value ResourceHandle) -- ^ __handle__ stackV2' op'options elem_type max_size | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs max_size] buildOp [] (opDef "StackV2" & opAttr "elem_type" .~ elem_type & op'options & opInputs .~ op'inputs) {- input_arg { name: "max_size" type: DT_INT32 } output_arg { name: "handle" type: DT_RESOURCE } attr { name: "elem_type" type: "type" } attr { name: "stack_name" type: "string" default_value { s: "" } } -} -- | stage :: forall v'1 dtypes m' . (MonadBuild m', TensorTypes dtypes) => TensorList (v'1) dtypes -- ^ __values__ -> m' (ControlNode) stage = stage' id stage' :: forall v'1 dtypes m' . (MonadBuild m', TensorTypes dtypes) => OpParams -> TensorList (v'1) dtypes -- ^ __values__ -> m' (ControlNode) stage' op'options values | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs values] buildOp [] (opDef "Stage" & opAttr "dtypes" .~ fromTensorTypes (Proxy :: Proxy dtypes) & op'options & opInputs .~ op'inputs) {- input_arg { name: "values" type_list_attr: "dtypes" } attr { name: "capacity" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "memory_limit" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "dtypes" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } -} -- | stageClear :: forall m' . (MonadBuild m') => [DataType] -- ^ __dtypes__ -> m' (ControlNode) stageClear = stageClear' id stageClear' :: forall m' . (MonadBuild m') => OpParams -> [DataType] -- ^ __dtypes__ -> m' (ControlNode) stageClear' op'options dtypes | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "StageClear" & opAttr "dtypes" .~ dtypes & op'options & opInputs .~ op'inputs) {- attr { name: "capacity" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "memory_limit" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "dtypes" type: "list(type)" } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } -} -- | stagePeek :: forall v'1 dtypes m' . (MonadBuild m', TensorTypes dtypes) => Tensor v'1 Data.Int.Int32 -- ^ __index__ -> m' (TensorList (Value) dtypes) -- ^ __values__ stagePeek = stagePeek' id stagePeek' :: forall v'1 dtypes m' . (MonadBuild m', TensorTypes dtypes) => OpParams -> Tensor v'1 Data.Int.Int32 -- ^ __index__ -> m' (TensorList (Value) dtypes) -- ^ __values__ stagePeek' op'options index | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs index] buildOp [] (opDef "StagePeek" & opAttr "dtypes" .~ fromTensorTypes (Proxy :: Proxy dtypes) & op'options & opInputs .~ op'inputs) {- input_arg { name: "index" type: DT_INT32 } output_arg { name: "values" type_list_attr: "dtypes" } attr { name: "capacity" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "memory_limit" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "dtypes" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } -} -- | stageSize :: forall m' . (MonadBuild m') => [DataType] -- ^ __dtypes__ -> m' (Tensor Value Data.Int.Int32) -- ^ __size__ stageSize = stageSize' id stageSize' :: forall m' . (MonadBuild m') => OpParams -> [DataType] -- ^ __dtypes__ -> m' (Tensor Value Data.Int.Int32) -- ^ __size__ stageSize' op'options dtypes | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "StageSize" & opAttr "dtypes" .~ dtypes & op'options & opInputs .~ op'inputs) {- output_arg { name: "size" type: DT_INT32 } attr { name: "capacity" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "memory_limit" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "dtypes" type: "list(type)" } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } -} -- | statefulRandomBinomial :: forall v'1 v'2 v'3 v'4 v'5 s t dtype m' . (MonadBuild m', OneOf '[Data.Int.Int32, Data.Int.Int64] s, OneOf '[Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] dtype) => Tensor v'1 ResourceHandle -- ^ __resource__ -> Tensor v'2 Data.Int.Int64 -- ^ __algorithm__ -> Tensor v'3 s -- ^ __shape__ -> Tensor v'4 t -- ^ __counts__ -> Tensor v'5 t -- ^ __probs__ -> m' (Tensor Value dtype) -- ^ __output__ statefulRandomBinomial = statefulRandomBinomial' id statefulRandomBinomial' :: forall v'1 v'2 v'3 v'4 v'5 s t dtype m' . (MonadBuild m', OneOf '[Data.Int.Int32, Data.Int.Int64] s, OneOf '[Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] dtype) => OpParams -> Tensor v'1 ResourceHandle -- ^ __resource__ -> Tensor v'2 Data.Int.Int64 -- ^ __algorithm__ -> Tensor v'3 s -- ^ __shape__ -> Tensor v'4 t -- ^ __counts__ -> Tensor v'5 t -- ^ __probs__ -> m' (Tensor Value dtype) -- ^ __output__ statefulRandomBinomial' op'options resource algorithm shape counts probs | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs resource, buildInputs algorithm, buildInputs shape, buildInputs counts, buildInputs probs] buildOp [] (opDef "StatefulRandomBinomial" & opAttr "S" .~ tensorType (undefined :: s) & opAttr "T" .~ tensorType (undefined :: t) & opAttr "dtype" .~ tensorType (undefined :: dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "resource" type: DT_RESOURCE } input_arg { name: "algorithm" type: DT_INT64 } input_arg { name: "shape" type_attr: "S" } input_arg { name: "counts" type_attr: "T" } input_arg { name: "probs" type_attr: "T" } output_arg { name: "output" type_attr: "dtype" } attr { name: "S" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "T" type: "type" default_value { type: DT_DOUBLE } allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 } } } attr { name: "dtype" type: "type" default_value { type: DT_INT64 } allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 } } } -} -- | statefulStandardNormal :: forall v'1 v'2 dtype shape_dtype m' . (MonadBuild m', TensorType dtype, TensorType shape_dtype) => Tensor v'1 ResourceHandle -- ^ __resource__ -> Tensor v'2 shape_dtype -- ^ __shape__ -> m' (Tensor Value dtype) -- ^ __output__ statefulStandardNormal = statefulStandardNormal' id statefulStandardNormal' :: forall v'1 v'2 dtype shape_dtype m' . (MonadBuild m', TensorType dtype, TensorType shape_dtype) => OpParams -> Tensor v'1 ResourceHandle -- ^ __resource__ -> Tensor v'2 shape_dtype -- ^ __shape__ -> m' (Tensor Value dtype) -- ^ __output__ statefulStandardNormal' op'options resource shape | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs resource, buildInputs shape] buildOp [] (opDef "StatefulStandardNormal" & opAttr "dtype" .~ tensorType (undefined :: dtype) & opAttr "shape_dtype" .~ tensorType (undefined :: shape_dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "resource" type: DT_RESOURCE } input_arg { name: "shape" type_attr: "shape_dtype" } output_arg { name: "output" type_attr: "dtype" } attr { name: "dtype" type: "type" default_value { type: DT_FLOAT } } attr { name: "shape_dtype" type: "type" default_value { type: DT_INT64 } } -} -- | statefulStandardNormalV2 :: forall v'1 v'2 v'3 dtype shape_dtype m' . (MonadBuild m', TensorType dtype, TensorType shape_dtype) => Tensor v'1 ResourceHandle -- ^ __resource__ -> Tensor v'2 Data.Int.Int64 -- ^ __algorithm__ -> Tensor v'3 shape_dtype -- ^ __shape__ -> m' (Tensor Value dtype) -- ^ __output__ statefulStandardNormalV2 = statefulStandardNormalV2' id statefulStandardNormalV2' :: forall v'1 v'2 v'3 dtype shape_dtype m' . (MonadBuild m', TensorType dtype, TensorType shape_dtype) => OpParams -> Tensor v'1 ResourceHandle -- ^ __resource__ -> Tensor v'2 Data.Int.Int64 -- ^ __algorithm__ -> Tensor v'3 shape_dtype -- ^ __shape__ -> m' (Tensor Value dtype) -- ^ __output__ statefulStandardNormalV2' op'options resource algorithm shape | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs resource, buildInputs algorithm, buildInputs shape] buildOp [] (opDef "StatefulStandardNormalV2" & opAttr "dtype" .~ tensorType (undefined :: dtype) & opAttr "shape_dtype" .~ tensorType (undefined :: shape_dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "resource" type: DT_RESOURCE } input_arg { name: "algorithm" type: DT_INT64 } input_arg { name: "shape" type_attr: "shape_dtype" } output_arg { name: "output" type_attr: "dtype" } attr { name: "dtype" type: "type" default_value { type: DT_FLOAT } } attr { name: "shape_dtype" type: "type" default_value { type: DT_INT64 } } -} -- | statefulTruncatedNormal :: forall v'1 v'2 v'3 dtype shape_dtype m' . (MonadBuild m', TensorType dtype, TensorType shape_dtype) => Tensor v'1 ResourceHandle -- ^ __resource__ -> Tensor v'2 Data.Int.Int64 -- ^ __algorithm__ -> Tensor v'3 shape_dtype -- ^ __shape__ -> m' (Tensor Value dtype) -- ^ __output__ statefulTruncatedNormal = statefulTruncatedNormal' id statefulTruncatedNormal' :: forall v'1 v'2 v'3 dtype shape_dtype m' . (MonadBuild m', TensorType dtype, TensorType shape_dtype) => OpParams -> Tensor v'1 ResourceHandle -- ^ __resource__ -> Tensor v'2 Data.Int.Int64 -- ^ __algorithm__ -> Tensor v'3 shape_dtype -- ^ __shape__ -> m' (Tensor Value dtype) -- ^ __output__ statefulTruncatedNormal' op'options resource algorithm shape | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs resource, buildInputs algorithm, buildInputs shape] buildOp [] (opDef "StatefulTruncatedNormal" & opAttr "dtype" .~ tensorType (undefined :: dtype) & opAttr "shape_dtype" .~ tensorType (undefined :: shape_dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "resource" type: DT_RESOURCE } input_arg { name: "algorithm" type: DT_INT64 } input_arg { name: "shape" type_attr: "shape_dtype" } output_arg { name: "output" type_attr: "dtype" } attr { name: "dtype" type: "type" default_value { type: DT_FLOAT } } attr { name: "shape_dtype" type: "type" default_value { type: DT_INT64 } } -} -- | statefulUniform :: forall v'1 v'2 v'3 dtype shape_dtype m' . (MonadBuild m', TensorType dtype, TensorType shape_dtype) => Tensor v'1 ResourceHandle -- ^ __resource__ -> Tensor v'2 Data.Int.Int64 -- ^ __algorithm__ -> Tensor v'3 shape_dtype -- ^ __shape__ -> m' (Tensor Value dtype) -- ^ __output__ statefulUniform = statefulUniform' id statefulUniform' :: forall v'1 v'2 v'3 dtype shape_dtype m' . (MonadBuild m', TensorType dtype, TensorType shape_dtype) => OpParams -> Tensor v'1 ResourceHandle -- ^ __resource__ -> Tensor v'2 Data.Int.Int64 -- ^ __algorithm__ -> Tensor v'3 shape_dtype -- ^ __shape__ -> m' (Tensor Value dtype) -- ^ __output__ statefulUniform' op'options resource algorithm shape | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs resource, buildInputs algorithm, buildInputs shape] buildOp [] (opDef "StatefulUniform" & opAttr "dtype" .~ tensorType (undefined :: dtype) & opAttr "shape_dtype" .~ tensorType (undefined :: shape_dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "resource" type: DT_RESOURCE } input_arg { name: "algorithm" type: DT_INT64 } input_arg { name: "shape" type_attr: "shape_dtype" } output_arg { name: "output" type_attr: "dtype" } attr { name: "dtype" type: "type" default_value { type: DT_FLOAT } } attr { name: "shape_dtype" type: "type" default_value { type: DT_INT64 } } -} -- | statefulUniformFullInt :: forall v'1 v'2 v'3 dtype shape_dtype m' . (MonadBuild m', TensorType dtype, TensorType shape_dtype) => Tensor v'1 ResourceHandle -- ^ __resource__ -> Tensor v'2 Data.Int.Int64 -- ^ __algorithm__ -> Tensor v'3 shape_dtype -- ^ __shape__ -> m' (Tensor Value dtype) -- ^ __output__ statefulUniformFullInt = statefulUniformFullInt' id statefulUniformFullInt' :: forall v'1 v'2 v'3 dtype shape_dtype m' . (MonadBuild m', TensorType dtype, TensorType shape_dtype) => OpParams -> Tensor v'1 ResourceHandle -- ^ __resource__ -> Tensor v'2 Data.Int.Int64 -- ^ __algorithm__ -> Tensor v'3 shape_dtype -- ^ __shape__ -> m' (Tensor Value dtype) -- ^ __output__ statefulUniformFullInt' op'options resource algorithm shape | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs resource, buildInputs algorithm, buildInputs shape] buildOp [] (opDef "StatefulUniformFullInt" & opAttr "dtype" .~ tensorType (undefined :: dtype) & opAttr "shape_dtype" .~ tensorType (undefined :: shape_dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "resource" type: DT_RESOURCE } input_arg { name: "algorithm" type: DT_INT64 } input_arg { name: "shape" type_attr: "shape_dtype" } output_arg { name: "output" type_attr: "dtype" } attr { name: "dtype" type: "type" default_value { type: DT_UINT64 } } attr { name: "shape_dtype" type: "type" default_value { type: DT_INT64 } } -} -- | statefulUniformInt :: forall v'1 v'2 v'3 v'4 v'5 dtype shape_dtype m' . (MonadBuild m', TensorType dtype, TensorType shape_dtype) => Tensor v'1 ResourceHandle -- ^ __resource__ -> Tensor v'2 Data.Int.Int64 -- ^ __algorithm__ -> Tensor v'3 shape_dtype -- ^ __shape__ -> Tensor v'4 dtype -- ^ __minval__ -> Tensor v'5 dtype -- ^ __maxval__ -> m' (Tensor Value dtype) -- ^ __output__ statefulUniformInt = statefulUniformInt' id statefulUniformInt' :: forall v'1 v'2 v'3 v'4 v'5 dtype shape_dtype m' . (MonadBuild m', TensorType dtype, TensorType shape_dtype) => OpParams -> Tensor v'1 ResourceHandle -- ^ __resource__ -> Tensor v'2 Data.Int.Int64 -- ^ __algorithm__ -> Tensor v'3 shape_dtype -- ^ __shape__ -> Tensor v'4 dtype -- ^ __minval__ -> Tensor v'5 dtype -- ^ __maxval__ -> m' (Tensor Value dtype) -- ^ __output__ statefulUniformInt' op'options resource algorithm shape minval maxval | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs resource, buildInputs algorithm, buildInputs shape, buildInputs minval, buildInputs maxval] buildOp [] (opDef "StatefulUniformInt" & opAttr "dtype" .~ tensorType (undefined :: dtype) & opAttr "shape_dtype" .~ tensorType (undefined :: shape_dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "resource" type: DT_RESOURCE } input_arg { name: "algorithm" type: DT_INT64 } input_arg { name: "shape" type_attr: "shape_dtype" } input_arg { name: "minval" type_attr: "dtype" } input_arg { name: "maxval" type_attr: "dtype" } output_arg { name: "output" type_attr: "dtype" } attr { name: "dtype" type: "type" default_value { type: DT_INT64 } } attr { name: "shape_dtype" type: "type" default_value { type: DT_INT64 } } -} -- | statelessMultinomial :: forall v'1 v'2 v'3 t tseed output_dtype . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tseed, OneOf '[Data.Int.Int32, Data.Int.Int64] output_dtype) => Tensor v'1 t -- ^ __logits__ -> Tensor v'2 Data.Int.Int32 -- ^ __num_samples__ -> Tensor v'3 tseed -- ^ __seed__ -> Tensor Build output_dtype -- ^ __output__ statelessMultinomial = statelessMultinomial' id statelessMultinomial' :: forall v'1 v'2 v'3 t tseed output_dtype . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tseed, OneOf '[Data.Int.Int32, Data.Int.Int64] output_dtype) => OpParams -> Tensor v'1 t -- ^ __logits__ -> Tensor v'2 Data.Int.Int32 -- ^ __num_samples__ -> Tensor v'3 tseed -- ^ __seed__ -> Tensor Build output_dtype -- ^ __output__ statelessMultinomial' op'options logits num_samples seed | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs logits, buildInputs num_samples, buildInputs seed] return (opDef "StatelessMultinomial" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tseed" .~ tensorType (undefined :: tseed) & opAttr "output_dtype" .~ tensorType (undefined :: output_dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "logits" type_attr: "T" } input_arg { name: "num_samples" type: DT_INT32 } input_arg { name: "seed" type_attr: "Tseed" } output_arg { name: "output" type_attr: "output_dtype" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_INT64 type: DT_BFLOAT16 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tseed" type: "type" default_value { type: DT_INT64 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "output_dtype" type: "type" default_value { type: DT_INT64 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | statelessRandomNormal :: forall v'1 v'2 dtype t tseed . (OneOf '[Data.Word.Word16, Double, Float] dtype, OneOf '[Data.Int.Int32, Data.Int.Int64] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tseed) => Tensor v'1 t -- ^ __shape__ -> Tensor v'2 tseed -- ^ __seed__ -> Tensor Build dtype -- ^ __output__ statelessRandomNormal = statelessRandomNormal' id statelessRandomNormal' :: forall v'1 v'2 dtype t tseed . (OneOf '[Data.Word.Word16, Double, Float] dtype, OneOf '[Data.Int.Int32, Data.Int.Int64] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tseed) => OpParams -> Tensor v'1 t -- ^ __shape__ -> Tensor v'2 tseed -- ^ __seed__ -> Tensor Build dtype -- ^ __output__ statelessRandomNormal' op'options shape seed | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs shape, buildInputs seed] return (opDef "StatelessRandomNormal" & opAttr "dtype" .~ tensorType (undefined :: dtype) & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tseed" .~ tensorType (undefined :: tseed) & op'options & opInputs .~ op'inputs) {- input_arg { name: "shape" type_attr: "T" } input_arg { name: "seed" type_attr: "Tseed" } output_arg { name: "output" type_attr: "dtype" } attr { name: "dtype" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_HALF type: DT_BFLOAT16 type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "T" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "Tseed" type: "type" default_value { type: DT_INT64 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | statelessRandomUniform :: forall v'1 v'2 dtype t tseed . (OneOf '[Data.Word.Word16, Double, Float] dtype, OneOf '[Data.Int.Int32, Data.Int.Int64] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tseed) => Tensor v'1 t -- ^ __shape__ -> Tensor v'2 tseed -- ^ __seed__ -> Tensor Build dtype -- ^ __output__ statelessRandomUniform = statelessRandomUniform' id statelessRandomUniform' :: forall v'1 v'2 dtype t tseed . (OneOf '[Data.Word.Word16, Double, Float] dtype, OneOf '[Data.Int.Int32, Data.Int.Int64] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tseed) => OpParams -> Tensor v'1 t -- ^ __shape__ -> Tensor v'2 tseed -- ^ __seed__ -> Tensor Build dtype -- ^ __output__ statelessRandomUniform' op'options shape seed | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs shape, buildInputs seed] return (opDef "StatelessRandomUniform" & opAttr "dtype" .~ tensorType (undefined :: dtype) & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tseed" .~ tensorType (undefined :: tseed) & op'options & opInputs .~ op'inputs) {- input_arg { name: "shape" type_attr: "T" } input_arg { name: "seed" type_attr: "Tseed" } output_arg { name: "output" type_attr: "dtype" } attr { name: "dtype" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_HALF type: DT_BFLOAT16 type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "T" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "Tseed" type: "type" default_value { type: DT_INT64 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | statelessRandomUniformInt :: forall v'1 v'2 v'3 v'4 dtype t tseed . (OneOf '[Data.Int.Int32, Data.Int.Int64] dtype, OneOf '[Data.Int.Int32, Data.Int.Int64] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tseed) => Tensor v'1 t -- ^ __shape__ -> Tensor v'2 tseed -- ^ __seed__ -> Tensor v'3 dtype -- ^ __minval__ -> Tensor v'4 dtype -- ^ __maxval__ -> Tensor Build dtype -- ^ __output__ statelessRandomUniformInt = statelessRandomUniformInt' id statelessRandomUniformInt' :: forall v'1 v'2 v'3 v'4 dtype t tseed . (OneOf '[Data.Int.Int32, Data.Int.Int64] dtype, OneOf '[Data.Int.Int32, Data.Int.Int64] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tseed) => OpParams -> Tensor v'1 t -- ^ __shape__ -> Tensor v'2 tseed -- ^ __seed__ -> Tensor v'3 dtype -- ^ __minval__ -> Tensor v'4 dtype -- ^ __maxval__ -> Tensor Build dtype -- ^ __output__ statelessRandomUniformInt' op'options shape seed minval maxval | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs shape, buildInputs seed, buildInputs minval, buildInputs maxval] return (opDef "StatelessRandomUniformInt" & opAttr "dtype" .~ tensorType (undefined :: dtype) & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tseed" .~ tensorType (undefined :: tseed) & op'options & opInputs .~ op'inputs) {- input_arg { name: "shape" type_attr: "T" } input_arg { name: "seed" type_attr: "Tseed" } input_arg { name: "minval" type_attr: "dtype" } input_arg { name: "maxval" type_attr: "dtype" } output_arg { name: "output" type_attr: "dtype" } attr { name: "dtype" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "T" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "Tseed" type: "type" default_value { type: DT_INT64 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | statelessTruncatedNormal :: forall v'1 v'2 dtype t tseed . (OneOf '[Data.Word.Word16, Double, Float] dtype, OneOf '[Data.Int.Int32, Data.Int.Int64] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tseed) => Tensor v'1 t -- ^ __shape__ -> Tensor v'2 tseed -- ^ __seed__ -> Tensor Build dtype -- ^ __output__ statelessTruncatedNormal = statelessTruncatedNormal' id statelessTruncatedNormal' :: forall v'1 v'2 dtype t tseed . (OneOf '[Data.Word.Word16, Double, Float] dtype, OneOf '[Data.Int.Int32, Data.Int.Int64] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tseed) => OpParams -> Tensor v'1 t -- ^ __shape__ -> Tensor v'2 tseed -- ^ __seed__ -> Tensor Build dtype -- ^ __output__ statelessTruncatedNormal' op'options shape seed | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs shape, buildInputs seed] return (opDef "StatelessTruncatedNormal" & opAttr "dtype" .~ tensorType (undefined :: dtype) & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tseed" .~ tensorType (undefined :: tseed) & op'options & opInputs .~ op'inputs) {- input_arg { name: "shape" type_attr: "T" } input_arg { name: "seed" type_attr: "Tseed" } output_arg { name: "output" type_attr: "dtype" } attr { name: "dtype" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_HALF type: DT_BFLOAT16 type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "T" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "Tseed" type: "type" default_value { type: DT_INT64 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | staticRegexFullMatch :: Tensor v'1 Data.ByteString.ByteString -- ^ __input__ -> Tensor Build Bool -- ^ __output__ staticRegexFullMatch = staticRegexFullMatch' id staticRegexFullMatch' :: OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __input__ -> Tensor Build Bool -- ^ __output__ staticRegexFullMatch' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "StaticRegexFullMatch" & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type: DT_STRING } output_arg { name: "output" type: DT_BOOL } attr { name: "pattern" type: "string" } -} -- | staticRegexReplace :: Tensor v'1 Data.ByteString.ByteString -- ^ __input__ -> Tensor Build Data.ByteString.ByteString -- ^ __output__ staticRegexReplace = staticRegexReplace' id staticRegexReplace' :: OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __input__ -> Tensor Build Data.ByteString.ByteString -- ^ __output__ staticRegexReplace' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "StaticRegexReplace" & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type: DT_STRING } output_arg { name: "output" type: DT_STRING } attr { name: "pattern" type: "string" } attr { name: "rewrite" type: "string" } attr { name: "replace_global" type: "bool" default_value { b: true } } -} -- | statsAggregatorHandleV2 :: forall m' . (MonadBuild m') => m' (Tensor Value ResourceHandle) -- ^ __handle__ statsAggregatorHandleV2 = statsAggregatorHandleV2' id statsAggregatorHandleV2' :: forall m' . (MonadBuild m') => OpParams -> m' (Tensor Value ResourceHandle) -- ^ __handle__ statsAggregatorHandleV2' op'options | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "StatsAggregatorHandleV2" & op'options & opInputs .~ op'inputs) {- output_arg { name: "handle" type: DT_RESOURCE } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } -} -- | statsAggregatorSetSummaryWriter :: forall v'1 v'2 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __stats_aggregator__ -> Tensor v'2 ResourceHandle -- ^ __summary__ -> m' (ControlNode) statsAggregatorSetSummaryWriter = statsAggregatorSetSummaryWriter' id statsAggregatorSetSummaryWriter' :: forall v'1 v'2 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __stats_aggregator__ -> Tensor v'2 ResourceHandle -- ^ __summary__ -> m' (ControlNode) statsAggregatorSetSummaryWriter' op'options stats_aggregator summary | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs stats_aggregator, buildInputs summary] buildOp [] (opDef "StatsAggregatorSetSummaryWriter" & op'options & opInputs .~ op'inputs) {- input_arg { name: "stats_aggregator" type: DT_RESOURCE } input_arg { name: "summary" type: DT_RESOURCE } -} -- | stopGradient :: forall v'1 t . (TensorType t) => Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ stopGradient = stopGradient' id stopGradient' :: forall v'1 t . (TensorType t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __output__ stopGradient' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "StopGradient" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } -} -- | stridedSlice :: forall v'1 v'2 v'3 v'4 t index . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] index) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 index -- ^ __begin__ -> Tensor v'3 index -- ^ __end__ -> Tensor v'4 index -- ^ __strides__ -> Tensor Build t -- ^ __output__ stridedSlice = stridedSlice' id stridedSlice' :: forall v'1 v'2 v'3 v'4 t index . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] index) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 index -- ^ __begin__ -> Tensor v'3 index -- ^ __end__ -> Tensor v'4 index -- ^ __strides__ -> Tensor Build t -- ^ __output__ stridedSlice' op'options input begin end strides | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs begin, buildInputs end, buildInputs strides] return (opDef "StridedSlice" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Index" .~ tensorType (undefined :: index) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "begin" type_attr: "Index" } input_arg { name: "end" type_attr: "Index" } input_arg { name: "strides" type_attr: "Index" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "Index" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "begin_mask" type: "int" default_value { i: 0 } } attr { name: "end_mask" type: "int" default_value { i: 0 } } attr { name: "ellipsis_mask" type: "int" default_value { i: 0 } } attr { name: "new_axis_mask" type: "int" default_value { i: 0 } } attr { name: "shrink_axis_mask" type: "int" default_value { i: 0 } } -} -- | stridedSliceAssign :: forall v'2 v'3 v'4 v'5 t index m' . (MonadBuild m', TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] index) => Tensor Ref t -- ^ __ref__ -> Tensor v'2 index -- ^ __begin__ -> Tensor v'3 index -- ^ __end__ -> Tensor v'4 index -- ^ __strides__ -> Tensor v'5 t -- ^ __value__ -> m' (Tensor Ref t) -- ^ __output_ref__ stridedSliceAssign = stridedSliceAssign' id stridedSliceAssign' :: forall v'2 v'3 v'4 v'5 t index m' . (MonadBuild m', TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] index) => OpParams -> Tensor Ref t -- ^ __ref__ -> Tensor v'2 index -- ^ __begin__ -> Tensor v'3 index -- ^ __end__ -> Tensor v'4 index -- ^ __strides__ -> Tensor v'5 t -- ^ __value__ -> m' (Tensor Ref t) -- ^ __output_ref__ stridedSliceAssign' op'options ref begin end strides value | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs ref, buildInputs begin, buildInputs end, buildInputs strides, buildInputs value] buildOp [] (opDef "StridedSliceAssign" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Index" .~ tensorType (undefined :: index) & op'options & opInputs .~ op'inputs) {- input_arg { name: "ref" type_attr: "T" is_ref: true } input_arg { name: "begin" type_attr: "Index" } input_arg { name: "end" type_attr: "Index" } input_arg { name: "strides" type_attr: "Index" } input_arg { name: "value" type_attr: "T" } output_arg { name: "output_ref" type_attr: "T" is_ref: true } attr { name: "T" type: "type" } attr { name: "Index" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "begin_mask" type: "int" default_value { i: 0 } } attr { name: "end_mask" type: "int" default_value { i: 0 } } attr { name: "ellipsis_mask" type: "int" default_value { i: 0 } } attr { name: "new_axis_mask" type: "int" default_value { i: 0 } } attr { name: "shrink_axis_mask" type: "int" default_value { i: 0 } } -} -- | stridedSliceGrad :: forall v'1 v'2 v'3 v'4 v'5 t index . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] index) => Tensor v'1 index -- ^ __shape__ -> Tensor v'2 index -- ^ __begin__ -> Tensor v'3 index -- ^ __end__ -> Tensor v'4 index -- ^ __strides__ -> Tensor v'5 t -- ^ __dy__ -> Tensor Build t -- ^ __output__ stridedSliceGrad = stridedSliceGrad' id stridedSliceGrad' :: forall v'1 v'2 v'3 v'4 v'5 t index . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] index) => OpParams -> Tensor v'1 index -- ^ __shape__ -> Tensor v'2 index -- ^ __begin__ -> Tensor v'3 index -- ^ __end__ -> Tensor v'4 index -- ^ __strides__ -> Tensor v'5 t -- ^ __dy__ -> Tensor Build t -- ^ __output__ stridedSliceGrad' op'options shape begin end strides dy | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs shape, buildInputs begin, buildInputs end, buildInputs strides, buildInputs dy] return (opDef "StridedSliceGrad" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Index" .~ tensorType (undefined :: index) & op'options & opInputs .~ op'inputs) {- input_arg { name: "shape" type_attr: "Index" } input_arg { name: "begin" type_attr: "Index" } input_arg { name: "end" type_attr: "Index" } input_arg { name: "strides" type_attr: "Index" } input_arg { name: "dy" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "Index" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "begin_mask" type: "int" default_value { i: 0 } } attr { name: "end_mask" type: "int" default_value { i: 0 } } attr { name: "ellipsis_mask" type: "int" default_value { i: 0 } } attr { name: "new_axis_mask" type: "int" default_value { i: 0 } } attr { name: "shrink_axis_mask" type: "int" default_value { i: 0 } } -} -- | stringFormat :: forall v'1 t . (TensorTypes t) => TensorList (v'1) t -- ^ __inputs__ -> Tensor Build Data.ByteString.ByteString -- ^ __output__ stringFormat = stringFormat' id stringFormat' :: forall v'1 t . (TensorTypes t) => OpParams -> TensorList (v'1) t -- ^ __inputs__ -> Tensor Build Data.ByteString.ByteString -- ^ __output__ stringFormat' op'options inputs | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs inputs] return (opDef "StringFormat" & opAttr "T" .~ fromTensorTypes (Proxy :: Proxy t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "inputs" type_list_attr: "T" } output_arg { name: "output" type: DT_STRING } attr { name: "T" type: "list(type)" has_minimum: true } attr { name: "template" type: "string" default_value { s: "%s" } } attr { name: "placeholder" type: "string" default_value { s: "%s" } } attr { name: "summarize" type: "int" default_value { i: 3 } } -} -- | stringJoin :: [Tensor v'1 Data.ByteString.ByteString] -- ^ __inputs__ -> Tensor Build Data.ByteString.ByteString -- ^ __output__ stringJoin = stringJoin' id stringJoin' :: OpParams -> [Tensor v'1 Data.ByteString.ByteString] -- ^ __inputs__ -> Tensor Build Data.ByteString.ByteString -- ^ __output__ stringJoin' op'options inputs | eqLengthGuard [("N", [("inputs", length inputs)])] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs inputs] return (opDef "StringJoin" & opAttr "N" .~ n & op'options & opInputs .~ op'inputs) where n = fromIntegral (length inputs) :: Int64 {- input_arg { name: "inputs" type: DT_STRING number_attr: "N" } output_arg { name: "output" type: DT_STRING } attr { name: "N" type: "int" has_minimum: true minimum: 1 } attr { name: "separator" type: "string" default_value { s: "" } } -} -- | stringLength :: Tensor v'1 Data.ByteString.ByteString -- ^ __input__ -> Tensor Build Data.Int.Int32 -- ^ __output__ stringLength = stringLength' id stringLength' :: OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __input__ -> Tensor Build Data.Int.Int32 -- ^ __output__ stringLength' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "StringLength" & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type: DT_STRING } output_arg { name: "output" type: DT_INT32 } attr { name: "unit" type: "string" default_value { s: "BYTE" } allowed_values { list { s: "BYTE" s: "UTF8_CHAR" } } } -} -- | stringLower :: Tensor v'1 Data.ByteString.ByteString -- ^ __input__ -> Tensor Build Data.ByteString.ByteString -- ^ __output__ stringLower = stringLower' id stringLower' :: OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __input__ -> Tensor Build Data.ByteString.ByteString -- ^ __output__ stringLower' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "StringLower" & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type: DT_STRING } output_arg { name: "output" type: DT_STRING } attr { name: "encoding" type: "string" default_value { s: "" } } -} -- | stringSplit :: Tensor v'1 Data.ByteString.ByteString -- ^ __input__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __delimiter__ -> (Tensor Build Data.Int.Int64, Tensor Build Data.ByteString.ByteString, Tensor Build Data.Int.Int64) -- ^ (__indices__, __values__, __shape__) -- -- * __indices__ -- -- * __values__ -- -- * __shape__ stringSplit = stringSplit' id stringSplit' :: OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __input__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __delimiter__ -> (Tensor Build Data.Int.Int64, Tensor Build Data.ByteString.ByteString, Tensor Build Data.Int.Int64) -- ^ (__indices__, __values__, __shape__) -- -- * __indices__ -- -- * __values__ -- -- * __shape__ stringSplit' op'options input delimiter | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs delimiter] return (opDef "StringSplit" & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type: DT_STRING } input_arg { name: "delimiter" type: DT_STRING } output_arg { name: "indices" type: DT_INT64 } output_arg { name: "values" type: DT_STRING } output_arg { name: "shape" type: DT_INT64 } attr { name: "skip_empty" type: "bool" default_value { b: true } } -} -- | stringSplitV2 :: Tensor v'1 Data.ByteString.ByteString -- ^ __input__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __sep__ -> (Tensor Build Data.Int.Int64, Tensor Build Data.ByteString.ByteString, Tensor Build Data.Int.Int64) -- ^ (__indices__, __values__, __shape__) -- -- * __indices__ -- -- * __values__ -- -- * __shape__ stringSplitV2 = stringSplitV2' id stringSplitV2' :: OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __input__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __sep__ -> (Tensor Build Data.Int.Int64, Tensor Build Data.ByteString.ByteString, Tensor Build Data.Int.Int64) -- ^ (__indices__, __values__, __shape__) -- -- * __indices__ -- -- * __values__ -- -- * __shape__ stringSplitV2' op'options input sep | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs sep] return (opDef "StringSplitV2" & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type: DT_STRING } input_arg { name: "sep" type: DT_STRING } output_arg { name: "indices" type: DT_INT64 } output_arg { name: "values" type: DT_STRING } output_arg { name: "shape" type: DT_INT64 } attr { name: "maxsplit" type: "int" default_value { i: -1 } } -} -- | stringStrip :: Tensor v'1 Data.ByteString.ByteString -- ^ __input__ -> Tensor Build Data.ByteString.ByteString -- ^ __output__ stringStrip = stringStrip' id stringStrip' :: OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __input__ -> Tensor Build Data.ByteString.ByteString -- ^ __output__ stringStrip' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "StringStrip" & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type: DT_STRING } output_arg { name: "output" type: DT_STRING } -} -- | stringToHashBucket :: Data.Int.Int64 -- ^ __num_buckets__ -> Tensor v'1 Data.ByteString.ByteString -- ^ __string_tensor__ -> Tensor Build Data.Int.Int64 -- ^ __output__ stringToHashBucket = stringToHashBucket' id stringToHashBucket' :: OpParams -> Data.Int.Int64 -- ^ __num_buckets__ -> Tensor v'1 Data.ByteString.ByteString -- ^ __string_tensor__ -> Tensor Build Data.Int.Int64 -- ^ __output__ stringToHashBucket' op'options num_buckets string_tensor | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs string_tensor] return (opDef "StringToHashBucket" & opAttr "num_buckets" .~ num_buckets & op'options & opInputs .~ op'inputs) {- input_arg { name: "string_tensor" type: DT_STRING } output_arg { name: "output" type: DT_INT64 } attr { name: "num_buckets" type: "int" has_minimum: true minimum: 1 } -} -- | stringToHashBucketFast :: Data.Int.Int64 -- ^ __num_buckets__ -> Tensor v'1 Data.ByteString.ByteString -- ^ __input__ -> Tensor Build Data.Int.Int64 -- ^ __output__ stringToHashBucketFast = stringToHashBucketFast' id stringToHashBucketFast' :: OpParams -> Data.Int.Int64 -- ^ __num_buckets__ -> Tensor v'1 Data.ByteString.ByteString -- ^ __input__ -> Tensor Build Data.Int.Int64 -- ^ __output__ stringToHashBucketFast' op'options num_buckets input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "StringToHashBucketFast" & opAttr "num_buckets" .~ num_buckets & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type: DT_STRING } output_arg { name: "output" type: DT_INT64 } attr { name: "num_buckets" type: "int" has_minimum: true minimum: 1 } -} -- | stringToHashBucketStrong :: Data.Int.Int64 -- ^ __num_buckets__ -> Tensor v'1 Data.ByteString.ByteString -- ^ __input__ -> Tensor Build Data.Int.Int64 -- ^ __output__ stringToHashBucketStrong = stringToHashBucketStrong' id stringToHashBucketStrong' :: OpParams -> Data.Int.Int64 -- ^ __num_buckets__ -> Tensor v'1 Data.ByteString.ByteString -- ^ __input__ -> Tensor Build Data.Int.Int64 -- ^ __output__ stringToHashBucketStrong' op'options num_buckets input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "StringToHashBucketStrong" & opAttr "num_buckets" .~ num_buckets & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type: DT_STRING } output_arg { name: "output" type: DT_INT64 } attr { name: "num_buckets" type: "int" has_minimum: true minimum: 1 } attr { name: "key" type: "list(int)" } -} -- | stringToNumber :: forall v'1 out_type . (OneOf '[Data.Int.Int32, Data.Int.Int64, Double, Float] out_type) => Tensor v'1 Data.ByteString.ByteString -- ^ __string_tensor__ -> Tensor Build out_type -- ^ __output__ stringToNumber = stringToNumber' id stringToNumber' :: forall v'1 out_type . (OneOf '[Data.Int.Int32, Data.Int.Int64, Double, Float] out_type) => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __string_tensor__ -> Tensor Build out_type -- ^ __output__ stringToNumber' op'options string_tensor | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs string_tensor] return (opDef "StringToNumber" & opAttr "out_type" .~ tensorType (undefined :: out_type) & op'options & opInputs .~ op'inputs) {- input_arg { name: "string_tensor" type: DT_STRING } output_arg { name: "output" type_attr: "out_type" } attr { name: "out_type" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 } } } -} -- | stringUpper :: Tensor v'1 Data.ByteString.ByteString -- ^ __input__ -> Tensor Build Data.ByteString.ByteString -- ^ __output__ stringUpper = stringUpper' id stringUpper' :: OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __input__ -> Tensor Build Data.ByteString.ByteString -- ^ __output__ stringUpper' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "StringUpper" & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type: DT_STRING } output_arg { name: "output" type: DT_STRING } attr { name: "encoding" type: "string" default_value { s: "" } } -} -- | sub :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ sub = sub' id sub' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ sub' op'options x y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y] return (opDef "Sub" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_UINT8 type: DT_INT8 type: DT_UINT16 type: DT_INT16 type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | substr :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int32, Data.Int.Int64] t) => Tensor v'1 Data.ByteString.ByteString -- ^ __input__ -> Tensor v'2 t -- ^ __pos__ -> Tensor v'3 t -- ^ __len__ -> Tensor Build Data.ByteString.ByteString -- ^ __output__ substr = substr' id substr' :: forall v'1 v'2 v'3 t . (OneOf '[Data.Int.Int32, Data.Int.Int64] t) => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __input__ -> Tensor v'2 t -- ^ __pos__ -> Tensor v'3 t -- ^ __len__ -> Tensor Build Data.ByteString.ByteString -- ^ __output__ substr' op'options input pos len | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs pos, buildInputs len] return (opDef "Substr" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type: DT_STRING } input_arg { name: "pos" type_attr: "T" } input_arg { name: "len" type_attr: "T" } output_arg { name: "output" type: DT_STRING } attr { name: "T" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "unit" type: "string" default_value { s: "BYTE" } allowed_values { list { s: "BYTE" s: "UTF8_CHAR" } } } -} -- | sum :: forall v'1 v'2 t tidx . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 tidx -- ^ __reduction_indices__ -> Tensor Build t -- ^ __output__ sum = sum' id sum' :: forall v'1 v'2 t tidx . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 tidx -- ^ __reduction_indices__ -> Tensor Build t -- ^ __output__ sum' op'options input reduction_indices | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs reduction_indices] return (opDef "Sum" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tidx" .~ tensorType (undefined :: tidx) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "reduction_indices" type_attr: "Tidx" } output_arg { name: "output" type_attr: "T" } attr { name: "keep_dims" type: "bool" default_value { b: false } } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tidx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | summaryWriter :: forall m' . (MonadBuild m') => m' (Tensor Value ResourceHandle) -- ^ __writer__ summaryWriter = summaryWriter' id summaryWriter' :: forall m' . (MonadBuild m') => OpParams -> m' (Tensor Value ResourceHandle) -- ^ __writer__ summaryWriter' op'options | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "SummaryWriter" & op'options & opInputs .~ op'inputs) {- output_arg { name: "writer" type: DT_RESOURCE } attr { name: "shared_name" type: "string" default_value { s: "" } } attr { name: "container" type: "string" default_value { s: "" } } -} -- | svd :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __input__ -> (Tensor Build t, Tensor Build t, Tensor Build t) -- ^ (__s__, __u__, __v__) -- -- * __s__ -- -- * __u__ -- -- * __v__ svd = svd' id svd' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> (Tensor Build t, Tensor Build t, Tensor Build t) -- ^ (__s__, __u__, __v__) -- -- * __s__ -- -- * __u__ -- -- * __v__ svd' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "Svd" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "s" type_attr: "T" } output_arg { name: "u" type_attr: "T" } output_arg { name: "v" type_attr: "T" } attr { name: "compute_uv" type: "bool" default_value { b: true } } attr { name: "full_matrices" type: "bool" default_value { b: false } } attr { name: "T" type: "type" allowed_values { list { type: DT_DOUBLE type: DT_FLOAT type: DT_HALF type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | switch :: forall v'1 v'2 t . (TensorType t) => Tensor v'1 t -- ^ __data__ -> Tensor v'2 Bool -- ^ __pred__ -> (Tensor Build t, Tensor Build t) -- ^ (__output_false__, __output_true__) -- -- * __output_false__ -- -- * __output_true__ switch = switch' id switch' :: forall v'1 v'2 t . (TensorType t) => OpParams -> Tensor v'1 t -- ^ __data__ -> Tensor v'2 Bool -- ^ __pred__ -> (Tensor Build t, Tensor Build t) -- ^ (__output_false__, __output_true__) -- -- * __output_false__ -- -- * __output_true__ switch' op'options data' pred | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs data', buildInputs pred] return (opDef "Switch" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "data" type_attr: "T" } input_arg { name: "pred" type: DT_BOOL } output_arg { name: "output_false" type_attr: "T" } output_arg { name: "output_true" type_attr: "T" } attr { name: "T" type: "type" } -} -- | tFRecordDataset :: forall v'1 v'2 v'3 m' . (MonadBuild m') => Tensor v'1 Data.ByteString.ByteString -- ^ __filenames__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __compression_type__ -> Tensor v'3 Data.Int.Int64 -- ^ __buffer_size__ -> m' (Tensor Value Variant) -- ^ __handle__ tFRecordDataset = tFRecordDataset' id tFRecordDataset' :: forall v'1 v'2 v'3 m' . (MonadBuild m') => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __filenames__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __compression_type__ -> Tensor v'3 Data.Int.Int64 -- ^ __buffer_size__ -> m' (Tensor Value Variant) -- ^ __handle__ tFRecordDataset' op'options filenames compression_type buffer_size | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs filenames, buildInputs compression_type, buildInputs buffer_size] buildOp [] (opDef "TFRecordDataset" & op'options & opInputs .~ op'inputs) {- input_arg { name: "filenames" type: DT_STRING } input_arg { name: "compression_type" type: DT_STRING } input_arg { name: "buffer_size" type: DT_INT64 } output_arg { name: "handle" type: DT_VARIANT } -} -- | tFRecordReader :: forall m' . (MonadBuild m') => m' (Tensor Ref Data.ByteString.ByteString) -- ^ __reader_handle__ tFRecordReader = tFRecordReader' id tFRecordReader' :: forall m' . (MonadBuild m') => OpParams -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __reader_handle__ tFRecordReader' op'options | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "TFRecordReader" & op'options & opInputs .~ op'inputs) {- output_arg { name: "reader_handle" type: DT_STRING is_ref: true } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } attr { name: "compression_type" type: "string" default_value { s: "" } } -} -- | tFRecordReaderV2 :: forall m' . (MonadBuild m') => m' (Tensor Value ResourceHandle) -- ^ __reader_handle__ tFRecordReaderV2 = tFRecordReaderV2' id tFRecordReaderV2' :: forall m' . (MonadBuild m') => OpParams -> m' (Tensor Value ResourceHandle) -- ^ __reader_handle__ tFRecordReaderV2' op'options | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "TFRecordReaderV2" & op'options & opInputs .~ op'inputs) {- output_arg { name: "reader_handle" type: DT_RESOURCE } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } attr { name: "compression_type" type: "string" default_value { s: "" } } -} -- | tPUCompilationResult :: Tensor Build Data.ByteString.ByteString -- ^ __output__ tPUCompilationResult = tPUCompilationResult' id tPUCompilationResult' :: OpParams -> Tensor Build Data.ByteString.ByteString -- ^ __output__ tPUCompilationResult' op'options | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] return (opDef "TPUCompilationResult" & op'options & opInputs .~ op'inputs) {- output_arg { name: "output" type: DT_STRING } -} -- | tPUEmbeddingActivations :: Data.Int.Int64 -- ^ __lookup_id__ -> Data.Int.Int64 -- ^ __table_id__ -> Tensor v'1 Float -- ^ __embedding_variable__ -> Tensor v'2 Float -- ^ __sliced_activations__ -> Tensor Build Float -- ^ __output__ tPUEmbeddingActivations = tPUEmbeddingActivations' id tPUEmbeddingActivations' :: OpParams -> Data.Int.Int64 -- ^ __lookup_id__ -> Data.Int.Int64 -- ^ __table_id__ -> Tensor v'1 Float -- ^ __embedding_variable__ -> Tensor v'2 Float -- ^ __sliced_activations__ -> Tensor Build Float -- ^ __output__ tPUEmbeddingActivations' op'options lookup_id table_id embedding_variable sliced_activations | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs embedding_variable, buildInputs sliced_activations] return (opDef "TPUEmbeddingActivations" & opAttr "lookup_id" .~ lookup_id & opAttr "table_id" .~ table_id & op'options & opInputs .~ op'inputs) {- input_arg { name: "embedding_variable" type: DT_FLOAT } input_arg { name: "sliced_activations" type: DT_FLOAT } output_arg { name: "output" type: DT_FLOAT } attr { name: "table_id" type: "int" has_minimum: true } attr { name: "lookup_id" type: "int" has_minimum: true } -} -- | tPUOrdinalSelector :: forall m' . (MonadBuild m') => m' (Tensor Value Data.Int.Int32) -- ^ __device_ordinals__ tPUOrdinalSelector = tPUOrdinalSelector' id tPUOrdinalSelector' :: forall m' . (MonadBuild m') => OpParams -> m' (Tensor Value Data.Int.Int32) -- ^ __device_ordinals__ tPUOrdinalSelector' op'options | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "TPUOrdinalSelector" & op'options & opInputs .~ op'inputs) {- output_arg { name: "device_ordinals" type: DT_INT32 } -} -- | tPUReplicateMetadata :: forall m' . (MonadBuild m') => Data.Int.Int64 -- ^ __num_replicas__ -> m' (ControlNode) tPUReplicateMetadata = tPUReplicateMetadata' id tPUReplicateMetadata' :: forall m' . (MonadBuild m') => OpParams -> Data.Int.Int64 -- ^ __num_replicas__ -> m' (ControlNode) tPUReplicateMetadata' op'options num_replicas | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "TPUReplicateMetadata" & opAttr "num_replicas" .~ num_replicas & op'options & opInputs .~ op'inputs) {- attr { name: "num_replicas" type: "int" has_minimum: true } attr { name: "num_cores_per_replica" type: "int" default_value { i: 1 } } attr { name: "topology" type: "string" default_value { s: "" } } attr { name: "use_tpu" type: "bool" default_value { b: true } } attr { name: "device_assignment" type: "list(int)" default_value { list { } } } attr { name: "computation_shape" type: "list(int)" default_value { list { } } } attr { name: "host_compute_core" type: "list(string)" default_value { list { } } } attr { name: "padding_map" type: "list(string)" default_value { list { } } } attr { name: "step_marker_location" type: "string" default_value { s: "STEP_MARK_AT_ENTRY" } } -} -- | tPUReplicatedInput :: forall v'1 t . (TensorType t) => [Tensor v'1 t] -- ^ __inputs__ -> Tensor Build t -- ^ __output__ tPUReplicatedInput = tPUReplicatedInput' id tPUReplicatedInput' :: forall v'1 t . (TensorType t) => OpParams -> [Tensor v'1 t] -- ^ __inputs__ -> Tensor Build t -- ^ __output__ tPUReplicatedInput' op'options inputs | eqLengthGuard [("N", [("inputs", length inputs)])] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs inputs] return (opDef "TPUReplicatedInput" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "N" .~ n & op'options & opInputs .~ op'inputs) where n = fromIntegral (length inputs) :: Int64 {- input_arg { name: "inputs" type_attr: "T" number_attr: "N" } output_arg { name: "output" type_attr: "T" } attr { name: "N" type: "int" has_minimum: true minimum: 1 } attr { name: "T" type: "type" } -} -- | tPUReplicatedOutput :: forall v'1 t . (TensorType t) => Data.Int.Int64 -- ^ __num_replicas__ -> Tensor v'1 t -- ^ __input__ -> [Tensor Build t] -- ^ __outputs__ tPUReplicatedOutput = tPUReplicatedOutput' id tPUReplicatedOutput' :: forall v'1 t . (TensorType t) => OpParams -> Data.Int.Int64 -- ^ __num_replicas__ -> Tensor v'1 t -- ^ __input__ -> [Tensor Build t] -- ^ __outputs__ tPUReplicatedOutput' op'options num_replicas input | eqLengthGuard [] = pureOp [num_replicas] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "TPUReplicatedOutput" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "num_replicas" .~ num_replicas & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "outputs" type_attr: "T" number_attr: "num_replicas" } attr { name: "num_replicas" type: "int" has_minimum: true minimum: 1 } attr { name: "T" type: "type" } -} -- | takeDataset :: [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor v'2 Data.Int.Int64 -- ^ __count__ -> Tensor Build Variant -- ^ __handle__ takeDataset = takeDataset' id takeDataset' :: OpParams -> [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor v'2 Data.Int.Int64 -- ^ __count__ -> Tensor Build Variant -- ^ __handle__ takeDataset' op'options output_types input_dataset count | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_dataset, buildInputs count] return (opDef "TakeDataset" & opAttr "output_types" .~ output_types & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_dataset" type: DT_VARIANT } input_arg { name: "count" type: DT_INT64 } output_arg { name: "handle" type: DT_VARIANT } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } -} -- | takeManySparseFromTensorsMap :: forall v'1 dtype m' . (MonadBuild m', TensorType dtype) => Tensor v'1 Data.Int.Int64 -- ^ __sparse_handles__ -> m' ((Tensor Value Data.Int.Int64, Tensor Value dtype, Tensor Value Data.Int.Int64)) -- ^ (__sparse_indices__, __sparse_values__, __sparse_shape__) -- -- * __sparse_indices__ -- -- * __sparse_values__ -- -- * __sparse_shape__ takeManySparseFromTensorsMap = takeManySparseFromTensorsMap' id takeManySparseFromTensorsMap' :: forall v'1 dtype m' . (MonadBuild m', TensorType dtype) => OpParams -> Tensor v'1 Data.Int.Int64 -- ^ __sparse_handles__ -> m' ((Tensor Value Data.Int.Int64, Tensor Value dtype, Tensor Value Data.Int.Int64)) -- ^ (__sparse_indices__, __sparse_values__, __sparse_shape__) -- -- * __sparse_indices__ -- -- * __sparse_values__ -- -- * __sparse_shape__ takeManySparseFromTensorsMap' op'options sparse_handles | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs sparse_handles] buildOp [] (opDef "TakeManySparseFromTensorsMap" & opAttr "dtype" .~ tensorType (undefined :: dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "sparse_handles" type: DT_INT64 } output_arg { name: "sparse_indices" type: DT_INT64 } output_arg { name: "sparse_values" type_attr: "dtype" } output_arg { name: "sparse_shape" type: DT_INT64 } attr { name: "dtype" type: "type" } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } -} -- | tan :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ tan = tan' id tan' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ tan' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "Tan" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | tanh :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ tanh = tanh' id tanh' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ tanh' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "Tanh" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | tanhGrad :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __y__ -> Tensor v'2 t -- ^ __dy__ -> Tensor Build t -- ^ __z__ tanhGrad = tanhGrad' id tanhGrad' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __y__ -> Tensor v'2 t -- ^ __dy__ -> Tensor Build t -- ^ __z__ tanhGrad' op'options y dy | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs y, buildInputs dy] return (opDef "TanhGrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "y" type_attr: "T" } input_arg { name: "dy" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | temporaryVariable :: forall dtype m' . (MonadBuild m', TensorType dtype) => Shape -- ^ __shape__ -> m' (Tensor Ref dtype) -- ^ __ref__ temporaryVariable = temporaryVariable' id temporaryVariable' :: forall dtype m' . (MonadBuild m', TensorType dtype) => OpParams -> Shape -- ^ __shape__ -> m' (Tensor Ref dtype) -- ^ __ref__ temporaryVariable' op'options shape | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "TemporaryVariable" & opAttr "dtype" .~ tensorType (undefined :: dtype) & opAttr "shape" .~ shape & op'options & opInputs .~ op'inputs) {- output_arg { name: "ref" type_attr: "dtype" is_ref: true } attr { name: "shape" type: "shape" } attr { name: "dtype" type: "type" } attr { name: "var_name" type: "string" default_value { s: "" } } -} -- | tensorArray :: forall v'1 m' . (MonadBuild m') => DataType -- ^ __dtype__ -> Tensor v'1 Data.Int.Int32 -- ^ __size__ -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __handle__ tensorArray = tensorArray' id tensorArray' :: forall v'1 m' . (MonadBuild m') => OpParams -> DataType -- ^ __dtype__ -> Tensor v'1 Data.Int.Int32 -- ^ __size__ -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __handle__ tensorArray' op'options dtype size | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs size] buildOp [] (opDef "TensorArray" & opAttr "dtype" .~ dtype & op'options & opInputs .~ op'inputs) {- input_arg { name: "size" type: DT_INT32 } output_arg { name: "handle" type: DT_STRING is_ref: true } attr { name: "dtype" type: "type" } attr { name: "dynamic_size" type: "bool" default_value { b: false } } attr { name: "clear_after_read" type: "bool" default_value { b: true } } attr { name: "tensor_array_name" type: "string" default_value { s: "" } } attr { name: "element_shape" type: "shape" default_value { shape { unknown_rank: true } } } -} -- | tensorArrayClose :: forall m' . (MonadBuild m') => Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> m' (ControlNode) tensorArrayClose = tensorArrayClose' id tensorArrayClose' :: forall m' . (MonadBuild m') => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> m' (ControlNode) tensorArrayClose' op'options handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle] buildOp [] (opDef "TensorArrayClose" & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_STRING is_ref: true } -} -- | tensorArrayCloseV2 :: forall v'1 m' . (MonadBuild m') => Tensor v'1 Data.ByteString.ByteString -- ^ __handle__ -> m' (ControlNode) tensorArrayCloseV2 = tensorArrayCloseV2' id tensorArrayCloseV2' :: forall v'1 m' . (MonadBuild m') => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __handle__ -> m' (ControlNode) tensorArrayCloseV2' op'options handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle] buildOp [] (opDef "TensorArrayCloseV2" & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_STRING } -} -- | tensorArrayCloseV3 :: forall v'1 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __handle__ -> m' (ControlNode) tensorArrayCloseV3 = tensorArrayCloseV3' id tensorArrayCloseV3' :: forall v'1 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __handle__ -> m' (ControlNode) tensorArrayCloseV3' op'options handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle] buildOp [] (opDef "TensorArrayCloseV3" & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_RESOURCE } -} -- | tensorArrayConcat :: forall v'2 dtype m' . (MonadBuild m', TensorType dtype) => Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Float -- ^ __flow_in__ -> m' ((Tensor Value dtype, Tensor Value Data.Int.Int64)) -- ^ (__value__, __lengths__) -- -- * __value__ -- -- * __lengths__ tensorArrayConcat = tensorArrayConcat' id tensorArrayConcat' :: forall v'2 dtype m' . (MonadBuild m', TensorType dtype) => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Float -- ^ __flow_in__ -> m' ((Tensor Value dtype, Tensor Value Data.Int.Int64)) -- ^ (__value__, __lengths__) -- -- * __value__ -- -- * __lengths__ tensorArrayConcat' op'options handle flow_in | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs flow_in] buildOp [] (opDef "TensorArrayConcat" & opAttr "dtype" .~ tensorType (undefined :: dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_STRING is_ref: true } input_arg { name: "flow_in" type: DT_FLOAT } output_arg { name: "value" type_attr: "dtype" } output_arg { name: "lengths" type: DT_INT64 } attr { name: "dtype" type: "type" } attr { name: "element_shape_except0" type: "shape" default_value { shape { unknown_rank: true } } } -} -- | tensorArrayConcatV2 :: forall v'1 v'2 dtype . (TensorType dtype) => Tensor v'1 Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Float -- ^ __flow_in__ -> (Tensor Build dtype, Tensor Build Data.Int.Int64) -- ^ (__value__, __lengths__) -- -- * __value__ -- -- * __lengths__ tensorArrayConcatV2 = tensorArrayConcatV2' id tensorArrayConcatV2' :: forall v'1 v'2 dtype . (TensorType dtype) => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Float -- ^ __flow_in__ -> (Tensor Build dtype, Tensor Build Data.Int.Int64) -- ^ (__value__, __lengths__) -- -- * __value__ -- -- * __lengths__ tensorArrayConcatV2' op'options handle flow_in | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs flow_in] return (opDef "TensorArrayConcatV2" & opAttr "dtype" .~ tensorType (undefined :: dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_STRING } input_arg { name: "flow_in" type: DT_FLOAT } output_arg { name: "value" type_attr: "dtype" } output_arg { name: "lengths" type: DT_INT64 } attr { name: "dtype" type: "type" } attr { name: "element_shape_except0" type: "shape" default_value { shape { unknown_rank: true } } } -} -- | tensorArrayConcatV3 :: forall v'1 v'2 dtype m' . (MonadBuild m', TensorType dtype) => Tensor v'1 ResourceHandle -- ^ __handle__ -> Tensor v'2 Float -- ^ __flow_in__ -> m' ((Tensor Value dtype, Tensor Value Data.Int.Int64)) -- ^ (__value__, __lengths__) -- -- * __value__ -- -- * __lengths__ tensorArrayConcatV3 = tensorArrayConcatV3' id tensorArrayConcatV3' :: forall v'1 v'2 dtype m' . (MonadBuild m', TensorType dtype) => OpParams -> Tensor v'1 ResourceHandle -- ^ __handle__ -> Tensor v'2 Float -- ^ __flow_in__ -> m' ((Tensor Value dtype, Tensor Value Data.Int.Int64)) -- ^ (__value__, __lengths__) -- -- * __value__ -- -- * __lengths__ tensorArrayConcatV3' op'options handle flow_in | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs flow_in] buildOp [] (opDef "TensorArrayConcatV3" & opAttr "dtype" .~ tensorType (undefined :: dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_RESOURCE } input_arg { name: "flow_in" type: DT_FLOAT } output_arg { name: "value" type_attr: "dtype" } output_arg { name: "lengths" type: DT_INT64 } attr { name: "dtype" type: "type" } attr { name: "element_shape_except0" type: "shape" default_value { shape { unknown_rank: true } } } -} -- | tensorArrayGather :: forall v'2 v'3 dtype m' . (MonadBuild m', TensorType dtype) => Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __indices__ -> Tensor v'3 Float -- ^ __flow_in__ -> m' (Tensor Value dtype) -- ^ __value__ tensorArrayGather = tensorArrayGather' id tensorArrayGather' :: forall v'2 v'3 dtype m' . (MonadBuild m', TensorType dtype) => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __indices__ -> Tensor v'3 Float -- ^ __flow_in__ -> m' (Tensor Value dtype) -- ^ __value__ tensorArrayGather' op'options handle indices flow_in | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs indices, buildInputs flow_in] buildOp [] (opDef "TensorArrayGather" & opAttr "dtype" .~ tensorType (undefined :: dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_STRING is_ref: true } input_arg { name: "indices" type: DT_INT32 } input_arg { name: "flow_in" type: DT_FLOAT } output_arg { name: "value" type_attr: "dtype" } attr { name: "dtype" type: "type" } attr { name: "element_shape" type: "shape" default_value { shape { unknown_rank: true } } } -} -- | tensorArrayGatherV2 :: forall v'1 v'2 v'3 dtype . (TensorType dtype) => Tensor v'1 Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __indices__ -> Tensor v'3 Float -- ^ __flow_in__ -> Tensor Build dtype -- ^ __value__ tensorArrayGatherV2 = tensorArrayGatherV2' id tensorArrayGatherV2' :: forall v'1 v'2 v'3 dtype . (TensorType dtype) => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __indices__ -> Tensor v'3 Float -- ^ __flow_in__ -> Tensor Build dtype -- ^ __value__ tensorArrayGatherV2' op'options handle indices flow_in | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs indices, buildInputs flow_in] return (opDef "TensorArrayGatherV2" & opAttr "dtype" .~ tensorType (undefined :: dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_STRING } input_arg { name: "indices" type: DT_INT32 } input_arg { name: "flow_in" type: DT_FLOAT } output_arg { name: "value" type_attr: "dtype" } attr { name: "dtype" type: "type" } attr { name: "element_shape" type: "shape" default_value { shape { unknown_rank: true } } } -} -- | tensorArrayGatherV3 :: forall v'1 v'2 v'3 dtype m' . (MonadBuild m', TensorType dtype) => Tensor v'1 ResourceHandle -- ^ __handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __indices__ -> Tensor v'3 Float -- ^ __flow_in__ -> m' (Tensor Value dtype) -- ^ __value__ tensorArrayGatherV3 = tensorArrayGatherV3' id tensorArrayGatherV3' :: forall v'1 v'2 v'3 dtype m' . (MonadBuild m', TensorType dtype) => OpParams -> Tensor v'1 ResourceHandle -- ^ __handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __indices__ -> Tensor v'3 Float -- ^ __flow_in__ -> m' (Tensor Value dtype) -- ^ __value__ tensorArrayGatherV3' op'options handle indices flow_in | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs indices, buildInputs flow_in] buildOp [] (opDef "TensorArrayGatherV3" & opAttr "dtype" .~ tensorType (undefined :: dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_RESOURCE } input_arg { name: "indices" type: DT_INT32 } input_arg { name: "flow_in" type: DT_FLOAT } output_arg { name: "value" type_attr: "dtype" } attr { name: "dtype" type: "type" } attr { name: "element_shape" type: "shape" default_value { shape { unknown_rank: true } } } -} -- | tensorArrayGrad :: forall v'1 v'2 m' . (MonadBuild m') => Tensor v'1 Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Float -- ^ __flow_in__ -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __grad_handle__ tensorArrayGrad = tensorArrayGrad' id tensorArrayGrad' :: forall v'1 v'2 m' . (MonadBuild m') => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Float -- ^ __flow_in__ -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __grad_handle__ tensorArrayGrad' op'options handle flow_in | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs flow_in] buildOp [] (opDef "TensorArrayGrad" & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_STRING } input_arg { name: "flow_in" type: DT_FLOAT } output_arg { name: "grad_handle" type: DT_STRING is_ref: true } attr { name: "source" type: "string" } -} -- | tensorArrayGradV2 :: forall v'1 v'2 m' . (MonadBuild m') => Tensor v'1 Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Float -- ^ __flow_in__ -> m' (Tensor Value Data.ByteString.ByteString) -- ^ __grad_handle__ tensorArrayGradV2 = tensorArrayGradV2' id tensorArrayGradV2' :: forall v'1 v'2 m' . (MonadBuild m') => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Float -- ^ __flow_in__ -> m' (Tensor Value Data.ByteString.ByteString) -- ^ __grad_handle__ tensorArrayGradV2' op'options handle flow_in | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs flow_in] buildOp [] (opDef "TensorArrayGradV2" & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_STRING } input_arg { name: "flow_in" type: DT_FLOAT } output_arg { name: "grad_handle" type: DT_STRING } attr { name: "source" type: "string" } -} -- | tensorArrayGradV3 :: forall v'1 v'2 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __handle__ -> Tensor v'2 Float -- ^ __flow_in__ -> m' ((Tensor Value ResourceHandle, Tensor Value Float)) -- ^ (__grad_handle__, __flow_out__) -- -- * __grad_handle__ -- -- * __flow_out__ tensorArrayGradV3 = tensorArrayGradV3' id tensorArrayGradV3' :: forall v'1 v'2 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __handle__ -> Tensor v'2 Float -- ^ __flow_in__ -> m' ((Tensor Value ResourceHandle, Tensor Value Float)) -- ^ (__grad_handle__, __flow_out__) -- -- * __grad_handle__ -- -- * __flow_out__ tensorArrayGradV3' op'options handle flow_in | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs flow_in] buildOp [] (opDef "TensorArrayGradV3" & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_RESOURCE } input_arg { name: "flow_in" type: DT_FLOAT } output_arg { name: "grad_handle" type: DT_RESOURCE } output_arg { name: "flow_out" type: DT_FLOAT } attr { name: "source" type: "string" } -} -- | tensorArrayGradWithShape :: forall v'1 v'2 v'3 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __handle__ -> Tensor v'2 Float -- ^ __flow_in__ -> Tensor v'3 Data.Int.Int32 -- ^ __shape_to_prepend__ -> m' ((Tensor Value ResourceHandle, Tensor Value Float)) -- ^ (__grad_handle__, __flow_out__) -- -- * __grad_handle__ -- -- * __flow_out__ tensorArrayGradWithShape = tensorArrayGradWithShape' id tensorArrayGradWithShape' :: forall v'1 v'2 v'3 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __handle__ -> Tensor v'2 Float -- ^ __flow_in__ -> Tensor v'3 Data.Int.Int32 -- ^ __shape_to_prepend__ -> m' ((Tensor Value ResourceHandle, Tensor Value Float)) -- ^ (__grad_handle__, __flow_out__) -- -- * __grad_handle__ -- -- * __flow_out__ tensorArrayGradWithShape' op'options handle flow_in shape_to_prepend | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs flow_in, buildInputs shape_to_prepend] buildOp [] (opDef "TensorArrayGradWithShape" & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_RESOURCE } input_arg { name: "flow_in" type: DT_FLOAT } input_arg { name: "shape_to_prepend" type: DT_INT32 } output_arg { name: "grad_handle" type: DT_RESOURCE } output_arg { name: "flow_out" type: DT_FLOAT } attr { name: "source" type: "string" } -} -- | tensorArrayPack :: forall v'2 dtype m' . (MonadBuild m', TensorType dtype) => Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Float -- ^ __flow_in__ -> m' (Tensor Value dtype) -- ^ __value__ tensorArrayPack = tensorArrayPack' id tensorArrayPack' :: forall v'2 dtype m' . (MonadBuild m', TensorType dtype) => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Float -- ^ __flow_in__ -> m' (Tensor Value dtype) -- ^ __value__ tensorArrayPack' op'options handle flow_in | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs flow_in] buildOp [] (opDef "TensorArrayPack" & opAttr "dtype" .~ tensorType (undefined :: dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_STRING is_ref: true } input_arg { name: "flow_in" type: DT_FLOAT } output_arg { name: "value" type_attr: "dtype" } attr { name: "dtype" type: "type" } attr { name: "element_shape" type: "shape" default_value { shape { unknown_rank: true } } } -} -- | tensorArrayRead :: forall v'2 v'3 dtype m' . (MonadBuild m', TensorType dtype) => Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __index__ -> Tensor v'3 Float -- ^ __flow_in__ -> m' (Tensor Value dtype) -- ^ __value__ tensorArrayRead = tensorArrayRead' id tensorArrayRead' :: forall v'2 v'3 dtype m' . (MonadBuild m', TensorType dtype) => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __index__ -> Tensor v'3 Float -- ^ __flow_in__ -> m' (Tensor Value dtype) -- ^ __value__ tensorArrayRead' op'options handle index flow_in | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs index, buildInputs flow_in] buildOp [] (opDef "TensorArrayRead" & opAttr "dtype" .~ tensorType (undefined :: dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_STRING is_ref: true } input_arg { name: "index" type: DT_INT32 } input_arg { name: "flow_in" type: DT_FLOAT } output_arg { name: "value" type_attr: "dtype" } attr { name: "dtype" type: "type" } -} -- | tensorArrayReadV2 :: forall v'1 v'2 v'3 dtype . (TensorType dtype) => Tensor v'1 Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __index__ -> Tensor v'3 Float -- ^ __flow_in__ -> Tensor Build dtype -- ^ __value__ tensorArrayReadV2 = tensorArrayReadV2' id tensorArrayReadV2' :: forall v'1 v'2 v'3 dtype . (TensorType dtype) => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __index__ -> Tensor v'3 Float -- ^ __flow_in__ -> Tensor Build dtype -- ^ __value__ tensorArrayReadV2' op'options handle index flow_in | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs index, buildInputs flow_in] return (opDef "TensorArrayReadV2" & opAttr "dtype" .~ tensorType (undefined :: dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_STRING } input_arg { name: "index" type: DT_INT32 } input_arg { name: "flow_in" type: DT_FLOAT } output_arg { name: "value" type_attr: "dtype" } attr { name: "dtype" type: "type" } -} -- | tensorArrayReadV3 :: forall v'1 v'2 v'3 dtype m' . (MonadBuild m', TensorType dtype) => Tensor v'1 ResourceHandle -- ^ __handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __index__ -> Tensor v'3 Float -- ^ __flow_in__ -> m' (Tensor Value dtype) -- ^ __value__ tensorArrayReadV3 = tensorArrayReadV3' id tensorArrayReadV3' :: forall v'1 v'2 v'3 dtype m' . (MonadBuild m', TensorType dtype) => OpParams -> Tensor v'1 ResourceHandle -- ^ __handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __index__ -> Tensor v'3 Float -- ^ __flow_in__ -> m' (Tensor Value dtype) -- ^ __value__ tensorArrayReadV3' op'options handle index flow_in | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs index, buildInputs flow_in] buildOp [] (opDef "TensorArrayReadV3" & opAttr "dtype" .~ tensorType (undefined :: dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_RESOURCE } input_arg { name: "index" type: DT_INT32 } input_arg { name: "flow_in" type: DT_FLOAT } output_arg { name: "value" type_attr: "dtype" } attr { name: "dtype" type: "type" } -} -- | tensorArrayScatter :: forall v'2 v'3 v'4 t m' . (MonadBuild m', TensorType t) => Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __indices__ -> Tensor v'3 t -- ^ __value__ -> Tensor v'4 Float -- ^ __flow_in__ -> m' (Tensor Value Float) -- ^ __flow_out__ tensorArrayScatter = tensorArrayScatter' id tensorArrayScatter' :: forall v'2 v'3 v'4 t m' . (MonadBuild m', TensorType t) => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __indices__ -> Tensor v'3 t -- ^ __value__ -> Tensor v'4 Float -- ^ __flow_in__ -> m' (Tensor Value Float) -- ^ __flow_out__ tensorArrayScatter' op'options handle indices value flow_in | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs indices, buildInputs value, buildInputs flow_in] buildOp [] (opDef "TensorArrayScatter" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_STRING is_ref: true } input_arg { name: "indices" type: DT_INT32 } input_arg { name: "value" type_attr: "T" } input_arg { name: "flow_in" type: DT_FLOAT } output_arg { name: "flow_out" type: DT_FLOAT } attr { name: "T" type: "type" } -} -- | tensorArrayScatterV2 :: forall v'1 v'2 v'3 v'4 t . (TensorType t) => Tensor v'1 Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __indices__ -> Tensor v'3 t -- ^ __value__ -> Tensor v'4 Float -- ^ __flow_in__ -> Tensor Build Float -- ^ __flow_out__ tensorArrayScatterV2 = tensorArrayScatterV2' id tensorArrayScatterV2' :: forall v'1 v'2 v'3 v'4 t . (TensorType t) => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __indices__ -> Tensor v'3 t -- ^ __value__ -> Tensor v'4 Float -- ^ __flow_in__ -> Tensor Build Float -- ^ __flow_out__ tensorArrayScatterV2' op'options handle indices value flow_in | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs indices, buildInputs value, buildInputs flow_in] return (opDef "TensorArrayScatterV2" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_STRING } input_arg { name: "indices" type: DT_INT32 } input_arg { name: "value" type_attr: "T" } input_arg { name: "flow_in" type: DT_FLOAT } output_arg { name: "flow_out" type: DT_FLOAT } attr { name: "T" type: "type" } -} -- | tensorArrayScatterV3 :: forall v'1 v'2 v'3 v'4 t m' . (MonadBuild m', TensorType t) => Tensor v'1 ResourceHandle -- ^ __handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __indices__ -> Tensor v'3 t -- ^ __value__ -> Tensor v'4 Float -- ^ __flow_in__ -> m' (Tensor Value Float) -- ^ __flow_out__ tensorArrayScatterV3 = tensorArrayScatterV3' id tensorArrayScatterV3' :: forall v'1 v'2 v'3 v'4 t m' . (MonadBuild m', TensorType t) => OpParams -> Tensor v'1 ResourceHandle -- ^ __handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __indices__ -> Tensor v'3 t -- ^ __value__ -> Tensor v'4 Float -- ^ __flow_in__ -> m' (Tensor Value Float) -- ^ __flow_out__ tensorArrayScatterV3' op'options handle indices value flow_in | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs indices, buildInputs value, buildInputs flow_in] buildOp [] (opDef "TensorArrayScatterV3" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_RESOURCE } input_arg { name: "indices" type: DT_INT32 } input_arg { name: "value" type_attr: "T" } input_arg { name: "flow_in" type: DT_FLOAT } output_arg { name: "flow_out" type: DT_FLOAT } attr { name: "T" type: "type" } -} -- | tensorArraySize :: forall v'2 m' . (MonadBuild m') => Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Float -- ^ __flow_in__ -> m' (Tensor Value Data.Int.Int32) -- ^ __size__ tensorArraySize = tensorArraySize' id tensorArraySize' :: forall v'2 m' . (MonadBuild m') => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Float -- ^ __flow_in__ -> m' (Tensor Value Data.Int.Int32) -- ^ __size__ tensorArraySize' op'options handle flow_in | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs flow_in] buildOp [] (opDef "TensorArraySize" & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_STRING is_ref: true } input_arg { name: "flow_in" type: DT_FLOAT } output_arg { name: "size" type: DT_INT32 } -} -- | tensorArraySizeV2 :: Tensor v'1 Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Float -- ^ __flow_in__ -> Tensor Build Data.Int.Int32 -- ^ __size__ tensorArraySizeV2 = tensorArraySizeV2' id tensorArraySizeV2' :: OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Float -- ^ __flow_in__ -> Tensor Build Data.Int.Int32 -- ^ __size__ tensorArraySizeV2' op'options handle flow_in | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs flow_in] return (opDef "TensorArraySizeV2" & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_STRING } input_arg { name: "flow_in" type: DT_FLOAT } output_arg { name: "size" type: DT_INT32 } -} -- | tensorArraySizeV3 :: forall v'1 v'2 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __handle__ -> Tensor v'2 Float -- ^ __flow_in__ -> m' (Tensor Value Data.Int.Int32) -- ^ __size__ tensorArraySizeV3 = tensorArraySizeV3' id tensorArraySizeV3' :: forall v'1 v'2 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __handle__ -> Tensor v'2 Float -- ^ __flow_in__ -> m' (Tensor Value Data.Int.Int32) -- ^ __size__ tensorArraySizeV3' op'options handle flow_in | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs flow_in] buildOp [] (opDef "TensorArraySizeV3" & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_RESOURCE } input_arg { name: "flow_in" type: DT_FLOAT } output_arg { name: "size" type: DT_INT32 } -} -- | tensorArraySplit :: forall v'2 v'3 v'4 t m' . (MonadBuild m', TensorType t) => Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 t -- ^ __value__ -> Tensor v'3 Data.Int.Int64 -- ^ __lengths__ -> Tensor v'4 Float -- ^ __flow_in__ -> m' (Tensor Value Float) -- ^ __flow_out__ tensorArraySplit = tensorArraySplit' id tensorArraySplit' :: forall v'2 v'3 v'4 t m' . (MonadBuild m', TensorType t) => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 t -- ^ __value__ -> Tensor v'3 Data.Int.Int64 -- ^ __lengths__ -> Tensor v'4 Float -- ^ __flow_in__ -> m' (Tensor Value Float) -- ^ __flow_out__ tensorArraySplit' op'options handle value lengths flow_in | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs value, buildInputs lengths, buildInputs flow_in] buildOp [] (opDef "TensorArraySplit" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_STRING is_ref: true } input_arg { name: "value" type_attr: "T" } input_arg { name: "lengths" type: DT_INT64 } input_arg { name: "flow_in" type: DT_FLOAT } output_arg { name: "flow_out" type: DT_FLOAT } attr { name: "T" type: "type" } -} -- | tensorArraySplitV2 :: forall v'1 v'2 v'3 v'4 t . (TensorType t) => Tensor v'1 Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 t -- ^ __value__ -> Tensor v'3 Data.Int.Int64 -- ^ __lengths__ -> Tensor v'4 Float -- ^ __flow_in__ -> Tensor Build Float -- ^ __flow_out__ tensorArraySplitV2 = tensorArraySplitV2' id tensorArraySplitV2' :: forall v'1 v'2 v'3 v'4 t . (TensorType t) => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 t -- ^ __value__ -> Tensor v'3 Data.Int.Int64 -- ^ __lengths__ -> Tensor v'4 Float -- ^ __flow_in__ -> Tensor Build Float -- ^ __flow_out__ tensorArraySplitV2' op'options handle value lengths flow_in | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs value, buildInputs lengths, buildInputs flow_in] return (opDef "TensorArraySplitV2" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_STRING } input_arg { name: "value" type_attr: "T" } input_arg { name: "lengths" type: DT_INT64 } input_arg { name: "flow_in" type: DT_FLOAT } output_arg { name: "flow_out" type: DT_FLOAT } attr { name: "T" type: "type" } -} -- | tensorArraySplitV3 :: forall v'1 v'2 v'3 v'4 t m' . (MonadBuild m', TensorType t) => Tensor v'1 ResourceHandle -- ^ __handle__ -> Tensor v'2 t -- ^ __value__ -> Tensor v'3 Data.Int.Int64 -- ^ __lengths__ -> Tensor v'4 Float -- ^ __flow_in__ -> m' (Tensor Value Float) -- ^ __flow_out__ tensorArraySplitV3 = tensorArraySplitV3' id tensorArraySplitV3' :: forall v'1 v'2 v'3 v'4 t m' . (MonadBuild m', TensorType t) => OpParams -> Tensor v'1 ResourceHandle -- ^ __handle__ -> Tensor v'2 t -- ^ __value__ -> Tensor v'3 Data.Int.Int64 -- ^ __lengths__ -> Tensor v'4 Float -- ^ __flow_in__ -> m' (Tensor Value Float) -- ^ __flow_out__ tensorArraySplitV3' op'options handle value lengths flow_in | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs value, buildInputs lengths, buildInputs flow_in] buildOp [] (opDef "TensorArraySplitV3" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_RESOURCE } input_arg { name: "value" type_attr: "T" } input_arg { name: "lengths" type: DT_INT64 } input_arg { name: "flow_in" type: DT_FLOAT } output_arg { name: "flow_out" type: DT_FLOAT } attr { name: "T" type: "type" } -} -- | tensorArrayUnpack :: forall v'2 v'3 t m' . (MonadBuild m', TensorType t) => Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 t -- ^ __value__ -> Tensor v'3 Float -- ^ __flow_in__ -> m' (Tensor Value Float) -- ^ __flow_out__ tensorArrayUnpack = tensorArrayUnpack' id tensorArrayUnpack' :: forall v'2 v'3 t m' . (MonadBuild m', TensorType t) => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 t -- ^ __value__ -> Tensor v'3 Float -- ^ __flow_in__ -> m' (Tensor Value Float) -- ^ __flow_out__ tensorArrayUnpack' op'options handle value flow_in | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs value, buildInputs flow_in] buildOp [] (opDef "TensorArrayUnpack" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_STRING is_ref: true } input_arg { name: "value" type_attr: "T" } input_arg { name: "flow_in" type: DT_FLOAT } output_arg { name: "flow_out" type: DT_FLOAT } attr { name: "T" type: "type" } -} -- | tensorArrayV2 :: forall v'1 m' . (MonadBuild m') => DataType -- ^ __dtype__ -> Tensor v'1 Data.Int.Int32 -- ^ __size__ -> m' (Tensor Value Data.ByteString.ByteString) -- ^ __handle__ tensorArrayV2 = tensorArrayV2' id tensorArrayV2' :: forall v'1 m' . (MonadBuild m') => OpParams -> DataType -- ^ __dtype__ -> Tensor v'1 Data.Int.Int32 -- ^ __size__ -> m' (Tensor Value Data.ByteString.ByteString) -- ^ __handle__ tensorArrayV2' op'options dtype size | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs size] buildOp [] (opDef "TensorArrayV2" & opAttr "dtype" .~ dtype & op'options & opInputs .~ op'inputs) {- input_arg { name: "size" type: DT_INT32 } output_arg { name: "handle" type: DT_STRING } attr { name: "dtype" type: "type" } attr { name: "element_shape" type: "shape" default_value { shape { unknown_rank: true } } } attr { name: "dynamic_size" type: "bool" default_value { b: false } } attr { name: "clear_after_read" type: "bool" default_value { b: true } } attr { name: "tensor_array_name" type: "string" default_value { s: "" } } -} -- | tensorArrayV3 :: forall v'1 m' . (MonadBuild m') => DataType -- ^ __dtype__ -> Tensor v'1 Data.Int.Int32 -- ^ __size__ -> m' ((Tensor Value ResourceHandle, Tensor Value Float)) -- ^ (__handle__, __flow__) -- -- * __handle__ -- -- * __flow__ tensorArrayV3 = tensorArrayV3' id tensorArrayV3' :: forall v'1 m' . (MonadBuild m') => OpParams -> DataType -- ^ __dtype__ -> Tensor v'1 Data.Int.Int32 -- ^ __size__ -> m' ((Tensor Value ResourceHandle, Tensor Value Float)) -- ^ (__handle__, __flow__) -- -- * __handle__ -- -- * __flow__ tensorArrayV3' op'options dtype size | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs size] buildOp [] (opDef "TensorArrayV3" & opAttr "dtype" .~ dtype & op'options & opInputs .~ op'inputs) {- input_arg { name: "size" type: DT_INT32 } output_arg { name: "handle" type: DT_RESOURCE } output_arg { name: "flow" type: DT_FLOAT } attr { name: "dtype" type: "type" } attr { name: "element_shape" type: "shape" default_value { shape { unknown_rank: true } } } attr { name: "dynamic_size" type: "bool" default_value { b: false } } attr { name: "clear_after_read" type: "bool" default_value { b: true } } attr { name: "identical_element_shapes" type: "bool" default_value { b: false } } attr { name: "tensor_array_name" type: "string" default_value { s: "" } } -} -- | tensorArrayWrite :: forall v'2 v'3 v'4 t m' . (MonadBuild m', TensorType t) => Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __index__ -> Tensor v'3 t -- ^ __value__ -> Tensor v'4 Float -- ^ __flow_in__ -> m' (Tensor Value Float) -- ^ __flow_out__ tensorArrayWrite = tensorArrayWrite' id tensorArrayWrite' :: forall v'2 v'3 v'4 t m' . (MonadBuild m', TensorType t) => OpParams -> Tensor Ref Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __index__ -> Tensor v'3 t -- ^ __value__ -> Tensor v'4 Float -- ^ __flow_in__ -> m' (Tensor Value Float) -- ^ __flow_out__ tensorArrayWrite' op'options handle index value flow_in | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs index, buildInputs value, buildInputs flow_in] buildOp [] (opDef "TensorArrayWrite" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_STRING is_ref: true } input_arg { name: "index" type: DT_INT32 } input_arg { name: "value" type_attr: "T" } input_arg { name: "flow_in" type: DT_FLOAT } output_arg { name: "flow_out" type: DT_FLOAT } attr { name: "T" type: "type" } -} -- | tensorArrayWriteV2 :: forall v'1 v'2 v'3 v'4 t . (TensorType t) => Tensor v'1 Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __index__ -> Tensor v'3 t -- ^ __value__ -> Tensor v'4 Float -- ^ __flow_in__ -> Tensor Build Float -- ^ __flow_out__ tensorArrayWriteV2 = tensorArrayWriteV2' id tensorArrayWriteV2' :: forall v'1 v'2 v'3 v'4 t . (TensorType t) => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __index__ -> Tensor v'3 t -- ^ __value__ -> Tensor v'4 Float -- ^ __flow_in__ -> Tensor Build Float -- ^ __flow_out__ tensorArrayWriteV2' op'options handle index value flow_in | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs index, buildInputs value, buildInputs flow_in] return (opDef "TensorArrayWriteV2" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_STRING } input_arg { name: "index" type: DT_INT32 } input_arg { name: "value" type_attr: "T" } input_arg { name: "flow_in" type: DT_FLOAT } output_arg { name: "flow_out" type: DT_FLOAT } attr { name: "T" type: "type" } -} -- | tensorArrayWriteV3 :: forall v'1 v'2 v'3 v'4 t m' . (MonadBuild m', TensorType t) => Tensor v'1 ResourceHandle -- ^ __handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __index__ -> Tensor v'3 t -- ^ __value__ -> Tensor v'4 Float -- ^ __flow_in__ -> m' (Tensor Value Float) -- ^ __flow_out__ tensorArrayWriteV3 = tensorArrayWriteV3' id tensorArrayWriteV3' :: forall v'1 v'2 v'3 v'4 t m' . (MonadBuild m', TensorType t) => OpParams -> Tensor v'1 ResourceHandle -- ^ __handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __index__ -> Tensor v'3 t -- ^ __value__ -> Tensor v'4 Float -- ^ __flow_in__ -> m' (Tensor Value Float) -- ^ __flow_out__ tensorArrayWriteV3' op'options handle index value flow_in | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs handle, buildInputs index, buildInputs value, buildInputs flow_in] buildOp [] (opDef "TensorArrayWriteV3" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "handle" type: DT_RESOURCE } input_arg { name: "index" type: DT_INT32 } input_arg { name: "value" type_attr: "T" } input_arg { name: "flow_in" type: DT_FLOAT } output_arg { name: "flow_out" type: DT_FLOAT } attr { name: "T" type: "type" } -} -- | tensorDataset :: forall v'1 toutput_types m' . (MonadBuild m', TensorTypes toutput_types) => TensorList (v'1) toutput_types -- ^ __components__ -> m' (Tensor Value Variant) -- ^ __handle__ tensorDataset = tensorDataset' id tensorDataset' :: forall v'1 toutput_types m' . (MonadBuild m', TensorTypes toutput_types) => OpParams -> TensorList (v'1) toutput_types -- ^ __components__ -> m' (Tensor Value Variant) -- ^ __handle__ tensorDataset' op'options components | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs components] buildOp [] (opDef "TensorDataset" & opAttr "Toutput_types" .~ fromTensorTypes (Proxy :: Proxy toutput_types) & op'options & opInputs .~ op'inputs) {- input_arg { name: "components" type_list_attr: "Toutput_types" } output_arg { name: "handle" type: DT_VARIANT } attr { name: "Toutput_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } -} -- | tensorForestCreateTreeVariable :: forall v'1 v'2 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __tree_handle__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __tree_config__ -> m' (ControlNode) tensorForestCreateTreeVariable = tensorForestCreateTreeVariable' id tensorForestCreateTreeVariable' :: forall v'1 v'2 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __tree_handle__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __tree_config__ -> m' (ControlNode) tensorForestCreateTreeVariable' op'options tree_handle tree_config | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs tree_handle, buildInputs tree_config] buildOp [] (opDef "TensorForestCreateTreeVariable" & op'options & opInputs .~ op'inputs) {- input_arg { name: "tree_handle" type: DT_RESOURCE } input_arg { name: "tree_config" type: DT_STRING } -} -- | tensorForestTreeDeserialize :: forall v'1 v'2 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __tree_handle__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __tree_config__ -> m' (ControlNode) tensorForestTreeDeserialize = tensorForestTreeDeserialize' id tensorForestTreeDeserialize' :: forall v'1 v'2 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __tree_handle__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __tree_config__ -> m' (ControlNode) tensorForestTreeDeserialize' op'options tree_handle tree_config | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs tree_handle, buildInputs tree_config] buildOp [] (opDef "TensorForestTreeDeserialize" & op'options & opInputs .~ op'inputs) {- input_arg { name: "tree_handle" type: DT_RESOURCE } input_arg { name: "tree_config" type: DT_STRING } -} -- | tensorForestTreeIsInitializedOp :: forall v'1 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __tree_handle__ -> m' (Tensor Value Bool) -- ^ __is_initialized__ tensorForestTreeIsInitializedOp = tensorForestTreeIsInitializedOp' id tensorForestTreeIsInitializedOp' :: forall v'1 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __tree_handle__ -> m' (Tensor Value Bool) -- ^ __is_initialized__ tensorForestTreeIsInitializedOp' op'options tree_handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs tree_handle] buildOp [] (opDef "TensorForestTreeIsInitializedOp" & op'options & opInputs .~ op'inputs) {- input_arg { name: "tree_handle" type: DT_RESOURCE } output_arg { name: "is_initialized" type: DT_BOOL } -} -- | tensorForestTreePredict :: forall v'1 v'2 m' . (MonadBuild m') => Data.Int.Int64 -- ^ __logits_dimension__ -> Tensor v'1 ResourceHandle -- ^ __tree_handle__ -> Tensor v'2 Float -- ^ __dense_features__ -> m' (Tensor Value Float) -- ^ __logits__ tensorForestTreePredict = tensorForestTreePredict' id tensorForestTreePredict' :: forall v'1 v'2 m' . (MonadBuild m') => OpParams -> Data.Int.Int64 -- ^ __logits_dimension__ -> Tensor v'1 ResourceHandle -- ^ __tree_handle__ -> Tensor v'2 Float -- ^ __dense_features__ -> m' (Tensor Value Float) -- ^ __logits__ tensorForestTreePredict' op'options logits_dimension tree_handle dense_features | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs tree_handle, buildInputs dense_features] buildOp [] (opDef "TensorForestTreePredict" & opAttr "logits_dimension" .~ logits_dimension & op'options & opInputs .~ op'inputs) {- input_arg { name: "tree_handle" type: DT_RESOURCE } input_arg { name: "dense_features" type: DT_FLOAT } output_arg { name: "logits" type: DT_FLOAT } attr { name: "logits_dimension" type: "int" } -} -- | tensorForestTreeResourceHandleOp :: forall m' . (MonadBuild m') => m' (Tensor Value ResourceHandle) -- ^ __resource__ tensorForestTreeResourceHandleOp = tensorForestTreeResourceHandleOp' id tensorForestTreeResourceHandleOp' :: forall m' . (MonadBuild m') => OpParams -> m' (Tensor Value ResourceHandle) -- ^ __resource__ tensorForestTreeResourceHandleOp' op'options | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "TensorForestTreeResourceHandleOp" & op'options & opInputs .~ op'inputs) {- output_arg { name: "resource" type: DT_RESOURCE } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } -} -- | tensorForestTreeSerialize :: forall v'1 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __tree_handle__ -> m' (Tensor Value Data.ByteString.ByteString) -- ^ __tree_config__ tensorForestTreeSerialize = tensorForestTreeSerialize' id tensorForestTreeSerialize' :: forall v'1 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __tree_handle__ -> m' (Tensor Value Data.ByteString.ByteString) -- ^ __tree_config__ tensorForestTreeSerialize' op'options tree_handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs tree_handle] buildOp [] (opDef "TensorForestTreeSerialize" & op'options & opInputs .~ op'inputs) {- input_arg { name: "tree_handle" type: DT_RESOURCE } output_arg { name: "tree_config" type: DT_STRING } -} -- | tensorForestTreeSize :: forall v'1 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __tree_handle__ -> m' (Tensor Value Data.Int.Int32) -- ^ __tree_size__ tensorForestTreeSize = tensorForestTreeSize' id tensorForestTreeSize' :: forall v'1 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __tree_handle__ -> m' (Tensor Value Data.Int.Int32) -- ^ __tree_size__ tensorForestTreeSize' op'options tree_handle | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs tree_handle] buildOp [] (opDef "TensorForestTreeSize" & op'options & opInputs .~ op'inputs) {- input_arg { name: "tree_handle" type: DT_RESOURCE } output_arg { name: "tree_size" type: DT_INT32 } -} -- | tensorListConcat :: forall v'1 element_dtype . (TensorType element_dtype) => Tensor v'1 Variant -- ^ __input_handle__ -> (Tensor Build element_dtype, Tensor Build Data.Int.Int64) -- ^ (__tensor__, __lengths__) -- -- * __tensor__ -- -- * __lengths__ tensorListConcat = tensorListConcat' id tensorListConcat' :: forall v'1 element_dtype . (TensorType element_dtype) => OpParams -> Tensor v'1 Variant -- ^ __input_handle__ -> (Tensor Build element_dtype, Tensor Build Data.Int.Int64) -- ^ (__tensor__, __lengths__) -- -- * __tensor__ -- -- * __lengths__ tensorListConcat' op'options input_handle | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_handle] return (opDef "TensorListConcat" & opAttr "element_dtype" .~ tensorType (undefined :: element_dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_handle" type: DT_VARIANT } output_arg { name: "tensor" type_attr: "element_dtype" } output_arg { name: "lengths" type: DT_INT64 } attr { name: "element_dtype" type: "type" } attr { name: "element_shape" type: "shape" default_value { shape { unknown_rank: true } } } -} -- | tensorListConcatLists :: DataType -- ^ __element_dtype__ -> Tensor v'1 Variant -- ^ __input_a__ -> Tensor v'2 Variant -- ^ __input_b__ -> Tensor Build Variant -- ^ __output__ tensorListConcatLists = tensorListConcatLists' id tensorListConcatLists' :: OpParams -> DataType -- ^ __element_dtype__ -> Tensor v'1 Variant -- ^ __input_a__ -> Tensor v'2 Variant -- ^ __input_b__ -> Tensor Build Variant -- ^ __output__ tensorListConcatLists' op'options element_dtype input_a input_b | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_a, buildInputs input_b] return (opDef "TensorListConcatLists" & opAttr "element_dtype" .~ element_dtype & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_a" type: DT_VARIANT } input_arg { name: "input_b" type: DT_VARIANT } output_arg { name: "output" type: DT_VARIANT } attr { name: "element_dtype" type: "type" } -} -- | tensorListConcatV2 :: forall v'1 v'2 v'3 element_dtype shape_type . (TensorType element_dtype, OneOf '[Data.Int.Int32, Data.Int.Int64] shape_type) => Tensor v'1 Variant -- ^ __input_handle__ -> Tensor v'2 shape_type -- ^ __element_shape__ -> Tensor v'3 Data.Int.Int64 -- ^ __leading_dims__ -> (Tensor Build element_dtype, Tensor Build Data.Int.Int64) -- ^ (__tensor__, __lengths__) -- -- * __tensor__ -- -- * __lengths__ tensorListConcatV2 = tensorListConcatV2' id tensorListConcatV2' :: forall v'1 v'2 v'3 element_dtype shape_type . (TensorType element_dtype, OneOf '[Data.Int.Int32, Data.Int.Int64] shape_type) => OpParams -> Tensor v'1 Variant -- ^ __input_handle__ -> Tensor v'2 shape_type -- ^ __element_shape__ -> Tensor v'3 Data.Int.Int64 -- ^ __leading_dims__ -> (Tensor Build element_dtype, Tensor Build Data.Int.Int64) -- ^ (__tensor__, __lengths__) -- -- * __tensor__ -- -- * __lengths__ tensorListConcatV2' op'options input_handle element_shape leading_dims | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_handle, buildInputs element_shape, buildInputs leading_dims] return (opDef "TensorListConcatV2" & opAttr "element_dtype" .~ tensorType (undefined :: element_dtype) & opAttr "shape_type" .~ tensorType (undefined :: shape_type) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_handle" type: DT_VARIANT } input_arg { name: "element_shape" type_attr: "shape_type" } input_arg { name: "leading_dims" type: DT_INT64 } output_arg { name: "tensor" type_attr: "element_dtype" } output_arg { name: "lengths" type: DT_INT64 } attr { name: "element_dtype" type: "type" } attr { name: "shape_type" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | tensorListElementShape :: forall v'1 shape_type . (OneOf '[Data.Int.Int32, Data.Int.Int64] shape_type) => Tensor v'1 Variant -- ^ __input_handle__ -> Tensor Build shape_type -- ^ __element_shape__ tensorListElementShape = tensorListElementShape' id tensorListElementShape' :: forall v'1 shape_type . (OneOf '[Data.Int.Int32, Data.Int.Int64] shape_type) => OpParams -> Tensor v'1 Variant -- ^ __input_handle__ -> Tensor Build shape_type -- ^ __element_shape__ tensorListElementShape' op'options input_handle | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_handle] return (opDef "TensorListElementShape" & opAttr "shape_type" .~ tensorType (undefined :: shape_type) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_handle" type: DT_VARIANT } output_arg { name: "element_shape" type_attr: "shape_type" } attr { name: "shape_type" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | tensorListFromTensor :: forall v'1 v'2 element_dtype shape_type . (TensorType element_dtype, OneOf '[Data.Int.Int32, Data.Int.Int64] shape_type) => Tensor v'1 element_dtype -- ^ __tensor__ -> Tensor v'2 shape_type -- ^ __element_shape__ -> Tensor Build Variant -- ^ __output_handle__ tensorListFromTensor = tensorListFromTensor' id tensorListFromTensor' :: forall v'1 v'2 element_dtype shape_type . (TensorType element_dtype, OneOf '[Data.Int.Int32, Data.Int.Int64] shape_type) => OpParams -> Tensor v'1 element_dtype -- ^ __tensor__ -> Tensor v'2 shape_type -- ^ __element_shape__ -> Tensor Build Variant -- ^ __output_handle__ tensorListFromTensor' op'options tensor element_shape | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs tensor, buildInputs element_shape] return (opDef "TensorListFromTensor" & opAttr "element_dtype" .~ tensorType (undefined :: element_dtype) & opAttr "shape_type" .~ tensorType (undefined :: shape_type) & op'options & opInputs .~ op'inputs) {- input_arg { name: "tensor" type_attr: "element_dtype" } input_arg { name: "element_shape" type_attr: "shape_type" } output_arg { name: "output_handle" type: DT_VARIANT } attr { name: "element_dtype" type: "type" } attr { name: "shape_type" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | tensorListGather :: forall v'1 v'2 v'3 element_dtype . (TensorType element_dtype) => Tensor v'1 Variant -- ^ __input_handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __indices__ -> Tensor v'3 Data.Int.Int32 -- ^ __element_shape__ -> Tensor Build element_dtype -- ^ __values__ tensorListGather = tensorListGather' id tensorListGather' :: forall v'1 v'2 v'3 element_dtype . (TensorType element_dtype) => OpParams -> Tensor v'1 Variant -- ^ __input_handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __indices__ -> Tensor v'3 Data.Int.Int32 -- ^ __element_shape__ -> Tensor Build element_dtype -- ^ __values__ tensorListGather' op'options input_handle indices element_shape | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_handle, buildInputs indices, buildInputs element_shape] return (opDef "TensorListGather" & opAttr "element_dtype" .~ tensorType (undefined :: element_dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_handle" type: DT_VARIANT } input_arg { name: "indices" type: DT_INT32 } input_arg { name: "element_shape" type: DT_INT32 } output_arg { name: "values" type_attr: "element_dtype" } attr { name: "element_dtype" type: "type" } -} -- | tensorListGetItem :: forall v'1 v'2 v'3 element_dtype . (TensorType element_dtype) => Tensor v'1 Variant -- ^ __input_handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __index__ -> Tensor v'3 Data.Int.Int32 -- ^ __element_shape__ -> Tensor Build element_dtype -- ^ __item__ tensorListGetItem = tensorListGetItem' id tensorListGetItem' :: forall v'1 v'2 v'3 element_dtype . (TensorType element_dtype) => OpParams -> Tensor v'1 Variant -- ^ __input_handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __index__ -> Tensor v'3 Data.Int.Int32 -- ^ __element_shape__ -> Tensor Build element_dtype -- ^ __item__ tensorListGetItem' op'options input_handle index element_shape | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_handle, buildInputs index, buildInputs element_shape] return (opDef "TensorListGetItem" & opAttr "element_dtype" .~ tensorType (undefined :: element_dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_handle" type: DT_VARIANT } input_arg { name: "index" type: DT_INT32 } input_arg { name: "element_shape" type: DT_INT32 } output_arg { name: "item" type_attr: "element_dtype" } attr { name: "element_dtype" type: "type" } -} -- | tensorListLength :: Tensor v'1 Variant -- ^ __input_handle__ -> Tensor Build Data.Int.Int32 -- ^ __length__ tensorListLength = tensorListLength' id tensorListLength' :: OpParams -> Tensor v'1 Variant -- ^ __input_handle__ -> Tensor Build Data.Int.Int32 -- ^ __length__ tensorListLength' op'options input_handle | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_handle] return (opDef "TensorListLength" & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_handle" type: DT_VARIANT } output_arg { name: "length" type: DT_INT32 } -} -- | tensorListPopBack :: forall v'1 v'2 element_dtype . (TensorType element_dtype) => Tensor v'1 Variant -- ^ __input_handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __element_shape__ -> (Tensor Build Variant, Tensor Build element_dtype) -- ^ (__output_handle__, __tensor__) -- -- * __output_handle__ -- -- * __tensor__ tensorListPopBack = tensorListPopBack' id tensorListPopBack' :: forall v'1 v'2 element_dtype . (TensorType element_dtype) => OpParams -> Tensor v'1 Variant -- ^ __input_handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __element_shape__ -> (Tensor Build Variant, Tensor Build element_dtype) -- ^ (__output_handle__, __tensor__) -- -- * __output_handle__ -- -- * __tensor__ tensorListPopBack' op'options input_handle element_shape | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_handle, buildInputs element_shape] return (opDef "TensorListPopBack" & opAttr "element_dtype" .~ tensorType (undefined :: element_dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_handle" type: DT_VARIANT } input_arg { name: "element_shape" type: DT_INT32 } output_arg { name: "output_handle" type: DT_VARIANT } output_arg { name: "tensor" type_attr: "element_dtype" } attr { name: "element_dtype" type: "type" } -} -- | tensorListPushBack :: forall v'1 v'2 element_dtype . (TensorType element_dtype) => Tensor v'1 Variant -- ^ __input_handle__ -> Tensor v'2 element_dtype -- ^ __tensor__ -> Tensor Build Variant -- ^ __output_handle__ tensorListPushBack = tensorListPushBack' id tensorListPushBack' :: forall v'1 v'2 element_dtype . (TensorType element_dtype) => OpParams -> Tensor v'1 Variant -- ^ __input_handle__ -> Tensor v'2 element_dtype -- ^ __tensor__ -> Tensor Build Variant -- ^ __output_handle__ tensorListPushBack' op'options input_handle tensor | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_handle, buildInputs tensor] return (opDef "TensorListPushBack" & opAttr "element_dtype" .~ tensorType (undefined :: element_dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_handle" type: DT_VARIANT } input_arg { name: "tensor" type_attr: "element_dtype" } output_arg { name: "output_handle" type: DT_VARIANT } attr { name: "element_dtype" type: "type" } -} -- | tensorListPushBackBatch :: forall v'1 v'2 element_dtype . (TensorType element_dtype) => Tensor v'1 Variant -- ^ __input_handles__ -> Tensor v'2 element_dtype -- ^ __tensor__ -> Tensor Build Variant -- ^ __output_handles__ tensorListPushBackBatch = tensorListPushBackBatch' id tensorListPushBackBatch' :: forall v'1 v'2 element_dtype . (TensorType element_dtype) => OpParams -> Tensor v'1 Variant -- ^ __input_handles__ -> Tensor v'2 element_dtype -- ^ __tensor__ -> Tensor Build Variant -- ^ __output_handles__ tensorListPushBackBatch' op'options input_handles tensor | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_handles, buildInputs tensor] return (opDef "TensorListPushBackBatch" & opAttr "element_dtype" .~ tensorType (undefined :: element_dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_handles" type: DT_VARIANT } input_arg { name: "tensor" type_attr: "element_dtype" } output_arg { name: "output_handles" type: DT_VARIANT } attr { name: "element_dtype" type: "type" } -} -- | tensorListReserve :: forall v'1 v'2 shape_type . (OneOf '[Data.Int.Int32, Data.Int.Int64] shape_type) => DataType -- ^ __element_dtype__ -> Tensor v'1 shape_type -- ^ __element_shape__ -> Tensor v'2 Data.Int.Int32 -- ^ __num_elements__ -> Tensor Build Variant -- ^ __handle__ tensorListReserve = tensorListReserve' id tensorListReserve' :: forall v'1 v'2 shape_type . (OneOf '[Data.Int.Int32, Data.Int.Int64] shape_type) => OpParams -> DataType -- ^ __element_dtype__ -> Tensor v'1 shape_type -- ^ __element_shape__ -> Tensor v'2 Data.Int.Int32 -- ^ __num_elements__ -> Tensor Build Variant -- ^ __handle__ tensorListReserve' op'options element_dtype element_shape num_elements | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs element_shape, buildInputs num_elements] return (opDef "TensorListReserve" & opAttr "shape_type" .~ tensorType (undefined :: shape_type) & opAttr "element_dtype" .~ element_dtype & op'options & opInputs .~ op'inputs) {- input_arg { name: "element_shape" type_attr: "shape_type" } input_arg { name: "num_elements" type: DT_INT32 } output_arg { name: "handle" type: DT_VARIANT } attr { name: "element_dtype" type: "type" } attr { name: "shape_type" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | tensorListResize :: forall v'1 v'2 m' . (MonadBuild m') => Tensor v'1 Variant -- ^ __input_handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __size__ -> m' (Tensor Value Variant) -- ^ __output_handle__ tensorListResize = tensorListResize' id tensorListResize' :: forall v'1 v'2 m' . (MonadBuild m') => OpParams -> Tensor v'1 Variant -- ^ __input_handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __size__ -> m' (Tensor Value Variant) -- ^ __output_handle__ tensorListResize' op'options input_handle size | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_handle, buildInputs size] buildOp [] (opDef "TensorListResize" & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_handle" type: DT_VARIANT } input_arg { name: "size" type: DT_INT32 } output_arg { name: "output_handle" type: DT_VARIANT } -} -- | tensorListScatter :: forall v'1 v'2 v'3 element_dtype shape_type . (TensorType element_dtype, OneOf '[Data.Int.Int32, Data.Int.Int64] shape_type) => Tensor v'1 element_dtype -- ^ __tensor__ -> Tensor v'2 Data.Int.Int32 -- ^ __indices__ -> Tensor v'3 shape_type -- ^ __element_shape__ -> Tensor Build Variant -- ^ __output_handle__ tensorListScatter = tensorListScatter' id tensorListScatter' :: forall v'1 v'2 v'3 element_dtype shape_type . (TensorType element_dtype, OneOf '[Data.Int.Int32, Data.Int.Int64] shape_type) => OpParams -> Tensor v'1 element_dtype -- ^ __tensor__ -> Tensor v'2 Data.Int.Int32 -- ^ __indices__ -> Tensor v'3 shape_type -- ^ __element_shape__ -> Tensor Build Variant -- ^ __output_handle__ tensorListScatter' op'options tensor indices element_shape | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs tensor, buildInputs indices, buildInputs element_shape] return (opDef "TensorListScatter" & opAttr "element_dtype" .~ tensorType (undefined :: element_dtype) & opAttr "shape_type" .~ tensorType (undefined :: shape_type) & op'options & opInputs .~ op'inputs) {- input_arg { name: "tensor" type_attr: "element_dtype" } input_arg { name: "indices" type: DT_INT32 } input_arg { name: "element_shape" type_attr: "shape_type" } output_arg { name: "output_handle" type: DT_VARIANT } attr { name: "element_dtype" type: "type" } attr { name: "shape_type" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | tensorListScatterIntoExistingList :: forall v'1 v'2 v'3 element_dtype . (TensorType element_dtype) => Tensor v'1 Variant -- ^ __input_handle__ -> Tensor v'2 element_dtype -- ^ __tensor__ -> Tensor v'3 Data.Int.Int32 -- ^ __indices__ -> Tensor Build Variant -- ^ __output_handle__ tensorListScatterIntoExistingList = tensorListScatterIntoExistingList' id tensorListScatterIntoExistingList' :: forall v'1 v'2 v'3 element_dtype . (TensorType element_dtype) => OpParams -> Tensor v'1 Variant -- ^ __input_handle__ -> Tensor v'2 element_dtype -- ^ __tensor__ -> Tensor v'3 Data.Int.Int32 -- ^ __indices__ -> Tensor Build Variant -- ^ __output_handle__ tensorListScatterIntoExistingList' op'options input_handle tensor indices | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_handle, buildInputs tensor, buildInputs indices] return (opDef "TensorListScatterIntoExistingList" & opAttr "element_dtype" .~ tensorType (undefined :: element_dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_handle" type: DT_VARIANT } input_arg { name: "tensor" type_attr: "element_dtype" } input_arg { name: "indices" type: DT_INT32 } output_arg { name: "output_handle" type: DT_VARIANT } attr { name: "element_dtype" type: "type" } -} -- | tensorListScatterV2 :: forall v'1 v'2 v'3 v'4 element_dtype shape_type . (TensorType element_dtype, OneOf '[Data.Int.Int32, Data.Int.Int64] shape_type) => Tensor v'1 element_dtype -- ^ __tensor__ -> Tensor v'2 Data.Int.Int32 -- ^ __indices__ -> Tensor v'3 shape_type -- ^ __element_shape__ -> Tensor v'4 Data.Int.Int32 -- ^ __num_elements__ -> Tensor Build Variant -- ^ __output_handle__ tensorListScatterV2 = tensorListScatterV2' id tensorListScatterV2' :: forall v'1 v'2 v'3 v'4 element_dtype shape_type . (TensorType element_dtype, OneOf '[Data.Int.Int32, Data.Int.Int64] shape_type) => OpParams -> Tensor v'1 element_dtype -- ^ __tensor__ -> Tensor v'2 Data.Int.Int32 -- ^ __indices__ -> Tensor v'3 shape_type -- ^ __element_shape__ -> Tensor v'4 Data.Int.Int32 -- ^ __num_elements__ -> Tensor Build Variant -- ^ __output_handle__ tensorListScatterV2' op'options tensor indices element_shape num_elements | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs tensor, buildInputs indices, buildInputs element_shape, buildInputs num_elements] return (opDef "TensorListScatterV2" & opAttr "element_dtype" .~ tensorType (undefined :: element_dtype) & opAttr "shape_type" .~ tensorType (undefined :: shape_type) & op'options & opInputs .~ op'inputs) {- input_arg { name: "tensor" type_attr: "element_dtype" } input_arg { name: "indices" type: DT_INT32 } input_arg { name: "element_shape" type_attr: "shape_type" } input_arg { name: "num_elements" type: DT_INT32 } output_arg { name: "output_handle" type: DT_VARIANT } attr { name: "element_dtype" type: "type" } attr { name: "shape_type" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | tensorListSetItem :: forall v'1 v'2 v'3 element_dtype . (TensorType element_dtype) => Tensor v'1 Variant -- ^ __input_handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __index__ -> Tensor v'3 element_dtype -- ^ __item__ -> Tensor Build Variant -- ^ __output_handle__ tensorListSetItem = tensorListSetItem' id tensorListSetItem' :: forall v'1 v'2 v'3 element_dtype . (TensorType element_dtype) => OpParams -> Tensor v'1 Variant -- ^ __input_handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __index__ -> Tensor v'3 element_dtype -- ^ __item__ -> Tensor Build Variant -- ^ __output_handle__ tensorListSetItem' op'options input_handle index item | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_handle, buildInputs index, buildInputs item] return (opDef "TensorListSetItem" & opAttr "element_dtype" .~ tensorType (undefined :: element_dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_handle" type: DT_VARIANT } input_arg { name: "index" type: DT_INT32 } input_arg { name: "item" type_attr: "element_dtype" } output_arg { name: "output_handle" type: DT_VARIANT } attr { name: "element_dtype" type: "type" } -} -- | tensorListSplit :: forall v'1 v'2 v'3 element_dtype shape_type . (TensorType element_dtype, OneOf '[Data.Int.Int32, Data.Int.Int64] shape_type) => Tensor v'1 element_dtype -- ^ __tensor__ -> Tensor v'2 shape_type -- ^ __element_shape__ -> Tensor v'3 Data.Int.Int64 -- ^ __lengths__ -> Tensor Build Variant -- ^ __output_handle__ tensorListSplit = tensorListSplit' id tensorListSplit' :: forall v'1 v'2 v'3 element_dtype shape_type . (TensorType element_dtype, OneOf '[Data.Int.Int32, Data.Int.Int64] shape_type) => OpParams -> Tensor v'1 element_dtype -- ^ __tensor__ -> Tensor v'2 shape_type -- ^ __element_shape__ -> Tensor v'3 Data.Int.Int64 -- ^ __lengths__ -> Tensor Build Variant -- ^ __output_handle__ tensorListSplit' op'options tensor element_shape lengths | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs tensor, buildInputs element_shape, buildInputs lengths] return (opDef "TensorListSplit" & opAttr "element_dtype" .~ tensorType (undefined :: element_dtype) & opAttr "shape_type" .~ tensorType (undefined :: shape_type) & op'options & opInputs .~ op'inputs) {- input_arg { name: "tensor" type_attr: "element_dtype" } input_arg { name: "element_shape" type_attr: "shape_type" } input_arg { name: "lengths" type: DT_INT64 } output_arg { name: "output_handle" type: DT_VARIANT } attr { name: "element_dtype" type: "type" } attr { name: "shape_type" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | tensorListStack :: forall v'1 v'2 element_dtype . (TensorType element_dtype) => Tensor v'1 Variant -- ^ __input_handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __element_shape__ -> Tensor Build element_dtype -- ^ __tensor__ tensorListStack = tensorListStack' id tensorListStack' :: forall v'1 v'2 element_dtype . (TensorType element_dtype) => OpParams -> Tensor v'1 Variant -- ^ __input_handle__ -> Tensor v'2 Data.Int.Int32 -- ^ __element_shape__ -> Tensor Build element_dtype -- ^ __tensor__ tensorListStack' op'options input_handle element_shape | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_handle, buildInputs element_shape] return (opDef "TensorListStack" & opAttr "element_dtype" .~ tensorType (undefined :: element_dtype) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_handle" type: DT_VARIANT } input_arg { name: "element_shape" type: DT_INT32 } output_arg { name: "tensor" type_attr: "element_dtype" } attr { name: "element_dtype" type: "type" } attr { name: "num_elements" type: "int" default_value { i: -1 } } -} -- | tensorScatterAdd :: forall v'1 v'2 v'3 t tindices . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor v'1 t -- ^ __tensor__ -> Tensor v'2 tindices -- ^ __indices__ -> Tensor v'3 t -- ^ __updates__ -> Tensor Build t -- ^ __output__ tensorScatterAdd = tensorScatterAdd' id tensorScatterAdd' :: forall v'1 v'2 v'3 t tindices . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor v'1 t -- ^ __tensor__ -> Tensor v'2 tindices -- ^ __indices__ -> Tensor v'3 t -- ^ __updates__ -> Tensor Build t -- ^ __output__ tensorScatterAdd' op'options tensor indices updates | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs tensor, buildInputs indices, buildInputs updates] return (opDef "TensorScatterAdd" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "tensor" type_attr: "T" } input_arg { name: "indices" type_attr: "Tindices" } input_arg { name: "updates" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | tensorScatterSub :: forall v'1 v'2 v'3 t tindices . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor v'1 t -- ^ __tensor__ -> Tensor v'2 tindices -- ^ __indices__ -> Tensor v'3 t -- ^ __updates__ -> Tensor Build t -- ^ __output__ tensorScatterSub = tensorScatterSub' id tensorScatterSub' :: forall v'1 v'2 v'3 t tindices . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor v'1 t -- ^ __tensor__ -> Tensor v'2 tindices -- ^ __indices__ -> Tensor v'3 t -- ^ __updates__ -> Tensor Build t -- ^ __output__ tensorScatterSub' op'options tensor indices updates | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs tensor, buildInputs indices, buildInputs updates] return (opDef "TensorScatterSub" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "tensor" type_attr: "T" } input_arg { name: "indices" type_attr: "Tindices" } input_arg { name: "updates" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | tensorScatterUpdate :: forall v'1 v'2 v'3 t tindices . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor v'1 t -- ^ __tensor__ -> Tensor v'2 tindices -- ^ __indices__ -> Tensor v'3 t -- ^ __updates__ -> Tensor Build t -- ^ __output__ tensorScatterUpdate = tensorScatterUpdate' id tensorScatterUpdate' :: forall v'1 v'2 v'3 t tindices . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor v'1 t -- ^ __tensor__ -> Tensor v'2 tindices -- ^ __indices__ -> Tensor v'3 t -- ^ __updates__ -> Tensor Build t -- ^ __output__ tensorScatterUpdate' op'options tensor indices updates | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs tensor, buildInputs indices, buildInputs updates] return (opDef "TensorScatterUpdate" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "tensor" type_attr: "T" } input_arg { name: "indices" type_attr: "Tindices" } input_arg { name: "updates" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | tensorSliceDataset :: forall v'1 toutput_types m' . (MonadBuild m', TensorTypes toutput_types) => TensorList (v'1) toutput_types -- ^ __components__ -> m' (Tensor Value Variant) -- ^ __handle__ tensorSliceDataset = tensorSliceDataset' id tensorSliceDataset' :: forall v'1 toutput_types m' . (MonadBuild m', TensorTypes toutput_types) => OpParams -> TensorList (v'1) toutput_types -- ^ __components__ -> m' (Tensor Value Variant) -- ^ __handle__ tensorSliceDataset' op'options components | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs components] buildOp [] (opDef "TensorSliceDataset" & opAttr "Toutput_types" .~ fromTensorTypes (Proxy :: Proxy toutput_types) & op'options & opInputs .~ op'inputs) {- input_arg { name: "components" type_list_attr: "Toutput_types" } output_arg { name: "handle" type: DT_VARIANT } attr { name: "Toutput_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } -} -- | tensorStridedSliceUpdate :: forall v'1 v'2 v'3 v'4 v'5 t index . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] index) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 index -- ^ __begin__ -> Tensor v'3 index -- ^ __end__ -> Tensor v'4 index -- ^ __strides__ -> Tensor v'5 t -- ^ __value__ -> Tensor Build t -- ^ __output__ tensorStridedSliceUpdate = tensorStridedSliceUpdate' id tensorStridedSliceUpdate' :: forall v'1 v'2 v'3 v'4 v'5 t index . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] index) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 index -- ^ __begin__ -> Tensor v'3 index -- ^ __end__ -> Tensor v'4 index -- ^ __strides__ -> Tensor v'5 t -- ^ __value__ -> Tensor Build t -- ^ __output__ tensorStridedSliceUpdate' op'options input begin end strides value | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs begin, buildInputs end, buildInputs strides, buildInputs value] return (opDef "TensorStridedSliceUpdate" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Index" .~ tensorType (undefined :: index) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "begin" type_attr: "Index" } input_arg { name: "end" type_attr: "Index" } input_arg { name: "strides" type_attr: "Index" } input_arg { name: "value" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "Index" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "begin_mask" type: "int" default_value { i: 0 } } attr { name: "end_mask" type: "int" default_value { i: 0 } } attr { name: "ellipsis_mask" type: "int" default_value { i: 0 } } attr { name: "new_axis_mask" type: "int" default_value { i: 0 } } attr { name: "shrink_axis_mask" type: "int" default_value { i: 0 } } -} -- | tensorSummary :: forall v'1 t . (TensorType t) => Tensor v'1 t -- ^ __tensor__ -> Tensor Build Data.ByteString.ByteString -- ^ __summary__ tensorSummary = tensorSummary' id tensorSummary' :: forall v'1 t . (TensorType t) => OpParams -> Tensor v'1 t -- ^ __tensor__ -> Tensor Build Data.ByteString.ByteString -- ^ __summary__ tensorSummary' op'options tensor | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs tensor] return (opDef "TensorSummary" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "tensor" type_attr: "T" } output_arg { name: "summary" type: DT_STRING } attr { name: "T" type: "type" } attr { name: "description" type: "string" default_value { s: "" } } attr { name: "labels" type: "list(string)" default_value { list { } } } attr { name: "display_name" type: "string" default_value { s: "" } } -} -- | tensorSummaryV2 :: forall v'1 v'2 v'3 t . (TensorType t) => Tensor v'1 Data.ByteString.ByteString -- ^ __tag__ -> Tensor v'2 t -- ^ __tensor__ -> Tensor v'3 Data.ByteString.ByteString -- ^ __serialized_summary_metadata__ -> Tensor Build Data.ByteString.ByteString -- ^ __summary__ tensorSummaryV2 = tensorSummaryV2' id tensorSummaryV2' :: forall v'1 v'2 v'3 t . (TensorType t) => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __tag__ -> Tensor v'2 t -- ^ __tensor__ -> Tensor v'3 Data.ByteString.ByteString -- ^ __serialized_summary_metadata__ -> Tensor Build Data.ByteString.ByteString -- ^ __summary__ tensorSummaryV2' op'options tag tensor serialized_summary_metadata | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs tag, buildInputs tensor, buildInputs serialized_summary_metadata] return (opDef "TensorSummaryV2" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "tag" type: DT_STRING } input_arg { name: "tensor" type_attr: "T" } input_arg { name: "serialized_summary_metadata" type: DT_STRING } output_arg { name: "summary" type: DT_STRING } attr { name: "T" type: "type" } -} -- | textLineDataset :: forall v'1 v'2 v'3 m' . (MonadBuild m') => Tensor v'1 Data.ByteString.ByteString -- ^ __filenames__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __compression_type__ -> Tensor v'3 Data.Int.Int64 -- ^ __buffer_size__ -> m' (Tensor Value Variant) -- ^ __handle__ textLineDataset = textLineDataset' id textLineDataset' :: forall v'1 v'2 v'3 m' . (MonadBuild m') => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __filenames__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __compression_type__ -> Tensor v'3 Data.Int.Int64 -- ^ __buffer_size__ -> m' (Tensor Value Variant) -- ^ __handle__ textLineDataset' op'options filenames compression_type buffer_size | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs filenames, buildInputs compression_type, buildInputs buffer_size] buildOp [] (opDef "TextLineDataset" & op'options & opInputs .~ op'inputs) {- input_arg { name: "filenames" type: DT_STRING } input_arg { name: "compression_type" type: DT_STRING } input_arg { name: "buffer_size" type: DT_INT64 } output_arg { name: "handle" type: DT_VARIANT } -} -- | textLineReader :: forall m' . (MonadBuild m') => m' (Tensor Ref Data.ByteString.ByteString) -- ^ __reader_handle__ textLineReader = textLineReader' id textLineReader' :: forall m' . (MonadBuild m') => OpParams -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __reader_handle__ textLineReader' op'options | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "TextLineReader" & op'options & opInputs .~ op'inputs) {- output_arg { name: "reader_handle" type: DT_STRING is_ref: true } attr { name: "skip_header_lines" type: "int" default_value { i: 0 } } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } -} -- | textLineReaderV2 :: forall m' . (MonadBuild m') => m' (Tensor Value ResourceHandle) -- ^ __reader_handle__ textLineReaderV2 = textLineReaderV2' id textLineReaderV2' :: forall m' . (MonadBuild m') => OpParams -> m' (Tensor Value ResourceHandle) -- ^ __reader_handle__ textLineReaderV2' op'options | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "TextLineReaderV2" & op'options & opInputs .~ op'inputs) {- output_arg { name: "reader_handle" type: DT_RESOURCE } attr { name: "skip_header_lines" type: "int" default_value { i: 0 } } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } -} -- | threadUnsafeUnigramCandidateSampler :: forall v'1 m' . (MonadBuild m') => Data.Int.Int64 -- ^ __num_sampled__ -> Data.Int.Int64 -- ^ __num_true__ -> Data.Int.Int64 -- ^ __range_max__ -> Bool -- ^ __unique__ -> Tensor v'1 Data.Int.Int64 -- ^ __true_classes__ -> m' ((Tensor Value Data.Int.Int64, Tensor Value Float, Tensor Value Float)) -- ^ (__sampled_candidates__, __true_expected_count__, __sampled_expected_count__) -- -- * __sampled_candidates__ -- -- * __true_expected_count__ -- -- * __sampled_expected_count__ threadUnsafeUnigramCandidateSampler = threadUnsafeUnigramCandidateSampler' id threadUnsafeUnigramCandidateSampler' :: forall v'1 m' . (MonadBuild m') => OpParams -> Data.Int.Int64 -- ^ __num_sampled__ -> Data.Int.Int64 -- ^ __num_true__ -> Data.Int.Int64 -- ^ __range_max__ -> Bool -- ^ __unique__ -> Tensor v'1 Data.Int.Int64 -- ^ __true_classes__ -> m' ((Tensor Value Data.Int.Int64, Tensor Value Float, Tensor Value Float)) -- ^ (__sampled_candidates__, __true_expected_count__, __sampled_expected_count__) -- -- * __sampled_candidates__ -- -- * __true_expected_count__ -- -- * __sampled_expected_count__ threadUnsafeUnigramCandidateSampler' op'options num_sampled num_true range_max unique true_classes | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs true_classes] buildOp [] (opDef "ThreadUnsafeUnigramCandidateSampler" & opAttr "num_sampled" .~ num_sampled & opAttr "num_true" .~ num_true & opAttr "range_max" .~ range_max & opAttr "unique" .~ unique & op'options & opInputs .~ op'inputs) {- input_arg { name: "true_classes" type: DT_INT64 } output_arg { name: "sampled_candidates" type: DT_INT64 } output_arg { name: "true_expected_count" type: DT_FLOAT } output_arg { name: "sampled_expected_count" type: DT_FLOAT } attr { name: "num_true" type: "int" has_minimum: true minimum: 1 } attr { name: "num_sampled" type: "int" has_minimum: true minimum: 1 } attr { name: "unique" type: "bool" } attr { name: "range_max" type: "int" has_minimum: true minimum: 1 } attr { name: "seed" type: "int" default_value { i: 0 } } attr { name: "seed2" type: "int" default_value { i: 0 } } -} -- | tile :: forall v'1 v'2 t tmultiples . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tmultiples) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 tmultiples -- ^ __multiples__ -> Tensor Build t -- ^ __output__ tile = tile' id tile' :: forall v'1 v'2 t tmultiples . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tmultiples) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 tmultiples -- ^ __multiples__ -> Tensor Build t -- ^ __output__ tile' op'options input multiples | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs multiples] return (opDef "Tile" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tmultiples" .~ tensorType (undefined :: tmultiples) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "multiples" type_attr: "Tmultiples" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "Tmultiples" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | tileGrad :: forall v'1 v'2 t . (TensorType t) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 Data.Int.Int32 -- ^ __multiples__ -> Tensor Build t -- ^ __output__ tileGrad = tileGrad' id tileGrad' :: forall v'1 v'2 t . (TensorType t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 Data.Int.Int32 -- ^ __multiples__ -> Tensor Build t -- ^ __output__ tileGrad' op'options input multiples | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs multiples] return (opDef "TileGrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "multiples" type: DT_INT32 } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } -} -- | timestamp :: forall m' . (MonadBuild m') => m' (Tensor Value Double) -- ^ __ts__ timestamp = timestamp' id timestamp' :: forall m' . (MonadBuild m') => OpParams -> m' (Tensor Value Double) -- ^ __ts__ timestamp' op'options | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "Timestamp" & op'options & opInputs .~ op'inputs) {- output_arg { name: "ts" type: DT_DOUBLE } -} -- | topK :: forall v'1 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Data.Int.Int64 -- ^ __k__ -> Tensor v'1 t -- ^ __input__ -> (Tensor Build t, Tensor Build Data.Int.Int32) -- ^ (__values__, __indices__) -- -- * __values__ -- -- * __indices__ topK = topK' id topK' :: forall v'1 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Data.Int.Int64 -- ^ __k__ -> Tensor v'1 t -- ^ __input__ -> (Tensor Build t, Tensor Build Data.Int.Int32) -- ^ (__values__, __indices__) -- -- * __values__ -- -- * __indices__ topK' op'options k input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "TopK" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "k" .~ k & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "values" type_attr: "T" } output_arg { name: "indices" type: DT_INT32 } attr { name: "k" type: "int" has_minimum: true } attr { name: "sorted" type: "bool" default_value { b: true } } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_INT64 type: DT_BFLOAT16 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } -} -- | topKV2 :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 Data.Int.Int32 -- ^ __k__ -> (Tensor Build t, Tensor Build Data.Int.Int32) -- ^ (__values__, __indices__) -- -- * __values__ -- -- * __indices__ topKV2 = topKV2' id topKV2' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 Data.Int.Int32 -- ^ __k__ -> (Tensor Build t, Tensor Build Data.Int.Int32) -- ^ (__values__, __indices__) -- -- * __values__ -- -- * __indices__ topKV2' op'options input k | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs k] return (opDef "TopKV2" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } input_arg { name: "k" type: DT_INT32 } output_arg { name: "values" type_attr: "T" } output_arg { name: "indices" type: DT_INT32 } attr { name: "sorted" type: "bool" default_value { b: true } } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_INT64 type: DT_BFLOAT16 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } -} -- | transpose :: forall v'1 v'2 t tperm . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tperm) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 tperm -- ^ __perm__ -> Tensor Build t -- ^ __y__ transpose = transpose' id transpose' :: forall v'1 v'2 t tperm . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tperm) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 tperm -- ^ __perm__ -> Tensor Build t -- ^ __y__ transpose' op'options x perm | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs perm] return (opDef "Transpose" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tperm" .~ tensorType (undefined :: tperm) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "perm" type_attr: "Tperm" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "Tperm" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | tridiagonalMatMul :: forall v'1 v'2 v'3 v'4 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Double, Float] t) => Tensor v'1 t -- ^ __superdiag__ -> Tensor v'2 t -- ^ __maindiag__ -> Tensor v'3 t -- ^ __subdiag__ -> Tensor v'4 t -- ^ __rhs__ -> Tensor Build t -- ^ __output__ tridiagonalMatMul = tridiagonalMatMul' id tridiagonalMatMul' :: forall v'1 v'2 v'3 v'4 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __superdiag__ -> Tensor v'2 t -- ^ __maindiag__ -> Tensor v'3 t -- ^ __subdiag__ -> Tensor v'4 t -- ^ __rhs__ -> Tensor Build t -- ^ __output__ tridiagonalMatMul' op'options superdiag maindiag subdiag rhs | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs superdiag, buildInputs maindiag, buildInputs subdiag, buildInputs rhs] return (opDef "TridiagonalMatMul" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "superdiag" type_attr: "T" } input_arg { name: "maindiag" type_attr: "T" } input_arg { name: "subdiag" type_attr: "T" } input_arg { name: "rhs" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_DOUBLE type: DT_FLOAT type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | tridiagonalSolve :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Double, Float] t) => Tensor v'1 t -- ^ __diagonals__ -> Tensor v'2 t -- ^ __rhs__ -> Tensor Build t -- ^ __output__ tridiagonalSolve = tridiagonalSolve' id tridiagonalSolve' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __diagonals__ -> Tensor v'2 t -- ^ __rhs__ -> Tensor Build t -- ^ __output__ tridiagonalSolve' op'options diagonals rhs | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs diagonals, buildInputs rhs] return (opDef "TridiagonalSolve" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "diagonals" type_attr: "T" } input_arg { name: "rhs" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "partial_pivoting" type: "bool" default_value { b: true } } attr { name: "T" type: "type" allowed_values { list { type: DT_DOUBLE type: DT_FLOAT type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | truncateDiv :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ truncateDiv = truncateDiv' id truncateDiv' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ truncateDiv' op'options x y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y] return (opDef "TruncateDiv" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_UINT8 type: DT_INT8 type: DT_UINT16 type: DT_INT16 type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | truncateMod :: forall v'1 v'2 t . (OneOf '[Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ truncateMod = truncateMod' id truncateMod' :: forall v'1 v'2 t . (OneOf '[Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ truncateMod' op'options x y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y] return (opDef "TruncateMod" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE } } } -} -- | truncatedNormal :: forall v'1 dtype t m' . (MonadBuild m', OneOf '[Data.Word.Word16, Double, Float] dtype, OneOf '[Data.Int.Int32, Data.Int.Int64] t) => Tensor v'1 t -- ^ __shape__ -> m' (Tensor Value dtype) -- ^ __output__ truncatedNormal = truncatedNormal' id truncatedNormal' :: forall v'1 dtype t m' . (MonadBuild m', OneOf '[Data.Word.Word16, Double, Float] dtype, OneOf '[Data.Int.Int32, Data.Int.Int64] t) => OpParams -> Tensor v'1 t -- ^ __shape__ -> m' (Tensor Value dtype) -- ^ __output__ truncatedNormal' op'options shape | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs shape] buildOp [] (opDef "TruncatedNormal" & opAttr "dtype" .~ tensorType (undefined :: dtype) & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "shape" type_attr: "T" } output_arg { name: "output" type_attr: "dtype" } attr { name: "seed" type: "int" default_value { i: 0 } } attr { name: "seed2" type: "int" default_value { i: 0 } } attr { name: "dtype" type: "type" allowed_values { list { type: DT_HALF type: DT_BFLOAT16 type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "T" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | tryRpc :: forall v'1 v'2 v'3 m' . (MonadBuild m') => Tensor v'1 Data.ByteString.ByteString -- ^ __address__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __method__ -> Tensor v'3 Data.ByteString.ByteString -- ^ __request__ -> m' ((Tensor Value Data.ByteString.ByteString, Tensor Value Data.Int.Int32, Tensor Value Data.ByteString.ByteString)) -- ^ (__response__, __status_code__, __status_message__) -- -- * __response__ -- -- * __status_code__ -- -- * __status_message__ tryRpc = tryRpc' id tryRpc' :: forall v'1 v'2 v'3 m' . (MonadBuild m') => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __address__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __method__ -> Tensor v'3 Data.ByteString.ByteString -- ^ __request__ -> m' ((Tensor Value Data.ByteString.ByteString, Tensor Value Data.Int.Int32, Tensor Value Data.ByteString.ByteString)) -- ^ (__response__, __status_code__, __status_message__) -- -- * __response__ -- -- * __status_code__ -- -- * __status_message__ tryRpc' op'options address method request | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs address, buildInputs method, buildInputs request] buildOp [] (opDef "TryRpc" & op'options & opInputs .~ op'inputs) {- input_arg { name: "address" type: DT_STRING } input_arg { name: "method" type: DT_STRING } input_arg { name: "request" type: DT_STRING } output_arg { name: "response" type: DT_STRING } output_arg { name: "status_code" type: DT_INT32 } output_arg { name: "status_message" type: DT_STRING } attr { name: "protocol" type: "string" default_value { s: "" } } attr { name: "fail_fast" type: "bool" default_value { b: true } } attr { name: "timeout_in_ms" type: "int" default_value { i: 0 } } -} -- | unbatch :: forall v'1 v'2 v'3 t . (TensorType t) => Data.Int.Int64 -- ^ __timeout_micros__ -> Tensor v'1 t -- ^ __batched_tensor__ -> Tensor v'2 Data.Int.Int64 -- ^ __batch_index__ -> Tensor v'3 Data.Int.Int64 -- ^ __id__ -> Tensor Build t -- ^ __unbatched_tensor__ unbatch = unbatch' id unbatch' :: forall v'1 v'2 v'3 t . (TensorType t) => OpParams -> Data.Int.Int64 -- ^ __timeout_micros__ -> Tensor v'1 t -- ^ __batched_tensor__ -> Tensor v'2 Data.Int.Int64 -- ^ __batch_index__ -> Tensor v'3 Data.Int.Int64 -- ^ __id__ -> Tensor Build t -- ^ __unbatched_tensor__ unbatch' op'options timeout_micros batched_tensor batch_index id | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs batched_tensor, buildInputs batch_index, buildInputs id] return (opDef "Unbatch" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "timeout_micros" .~ timeout_micros & op'options & opInputs .~ op'inputs) {- input_arg { name: "batched_tensor" type_attr: "T" } input_arg { name: "batch_index" type: DT_INT64 } input_arg { name: "id" type: DT_INT64 } output_arg { name: "unbatched_tensor" type_attr: "T" } attr { name: "timeout_micros" type: "int" } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } attr { name: "T" type: "type" } -} -- | unbatchGrad :: forall v'1 v'2 v'3 v'4 t . (TensorType t) => Tensor v'1 t -- ^ __original_input__ -> Tensor v'2 Data.Int.Int64 -- ^ __batch_index__ -> Tensor v'3 t -- ^ __grad__ -> Tensor v'4 Data.Int.Int64 -- ^ __id__ -> Tensor Build t -- ^ __batched_grad__ unbatchGrad = unbatchGrad' id unbatchGrad' :: forall v'1 v'2 v'3 v'4 t . (TensorType t) => OpParams -> Tensor v'1 t -- ^ __original_input__ -> Tensor v'2 Data.Int.Int64 -- ^ __batch_index__ -> Tensor v'3 t -- ^ __grad__ -> Tensor v'4 Data.Int.Int64 -- ^ __id__ -> Tensor Build t -- ^ __batched_grad__ unbatchGrad' op'options original_input batch_index grad id | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs original_input, buildInputs batch_index, buildInputs grad, buildInputs id] return (opDef "UnbatchGrad" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "original_input" type_attr: "T" } input_arg { name: "batch_index" type: DT_INT64 } input_arg { name: "grad" type_attr: "T" } input_arg { name: "id" type: DT_INT64 } output_arg { name: "batched_grad" type_attr: "T" } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } attr { name: "T" type: "type" } -} -- | unicodeDecode :: forall v'1 tsplits . (OneOf '[Data.Int.Int32, Data.Int.Int64] tsplits) => Tensor v'1 Data.ByteString.ByteString -- ^ __input__ -> (Tensor Build tsplits, Tensor Build Data.Int.Int32) -- ^ (__row_splits__, __char_values__) -- -- * __row_splits__ -- -- * __char_values__ unicodeDecode = unicodeDecode' id unicodeDecode' :: forall v'1 tsplits . (OneOf '[Data.Int.Int32, Data.Int.Int64] tsplits) => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __input__ -> (Tensor Build tsplits, Tensor Build Data.Int.Int32) -- ^ (__row_splits__, __char_values__) -- -- * __row_splits__ -- -- * __char_values__ unicodeDecode' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "UnicodeDecode" & opAttr "Tsplits" .~ tensorType (undefined :: tsplits) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type: DT_STRING } output_arg { name: "row_splits" type_attr: "Tsplits" } output_arg { name: "char_values" type: DT_INT32 } attr { name: "input_encoding" type: "string" } attr { name: "errors" type: "string" default_value { s: "replace" } allowed_values { list { s: "strict" s: "replace" s: "ignore" } } } attr { name: "replacement_char" type: "int" default_value { i: 65533 } } attr { name: "replace_control_characters" type: "bool" default_value { b: false } } attr { name: "Tsplits" type: "type" default_value { type: DT_INT64 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | unicodeDecodeWithOffsets :: forall v'1 tsplits . (OneOf '[Data.Int.Int32, Data.Int.Int64] tsplits) => Tensor v'1 Data.ByteString.ByteString -- ^ __input__ -> (Tensor Build tsplits, Tensor Build Data.Int.Int32, Tensor Build Data.Int.Int64) -- ^ (__row_splits__, __char_values__, __char_to_byte_starts__) -- -- * __row_splits__ -- -- * __char_values__ -- -- * __char_to_byte_starts__ unicodeDecodeWithOffsets = unicodeDecodeWithOffsets' id unicodeDecodeWithOffsets' :: forall v'1 tsplits . (OneOf '[Data.Int.Int32, Data.Int.Int64] tsplits) => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __input__ -> (Tensor Build tsplits, Tensor Build Data.Int.Int32, Tensor Build Data.Int.Int64) -- ^ (__row_splits__, __char_values__, __char_to_byte_starts__) -- -- * __row_splits__ -- -- * __char_values__ -- -- * __char_to_byte_starts__ unicodeDecodeWithOffsets' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "UnicodeDecodeWithOffsets" & opAttr "Tsplits" .~ tensorType (undefined :: tsplits) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type: DT_STRING } output_arg { name: "row_splits" type_attr: "Tsplits" } output_arg { name: "char_values" type: DT_INT32 } output_arg { name: "char_to_byte_starts" type: DT_INT64 } attr { name: "input_encoding" type: "string" } attr { name: "errors" type: "string" default_value { s: "replace" } allowed_values { list { s: "strict" s: "replace" s: "ignore" } } } attr { name: "replacement_char" type: "int" default_value { i: 65533 } } attr { name: "replace_control_characters" type: "bool" default_value { b: false } } attr { name: "Tsplits" type: "type" default_value { type: DT_INT64 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | unicodeEncode :: forall v'1 v'2 tsplits . (OneOf '[Data.Int.Int32, Data.Int.Int64] tsplits) => Tensor v'1 Data.Int.Int32 -- ^ __input_values__ -> Tensor v'2 tsplits -- ^ __input_splits__ -> Tensor Build Data.ByteString.ByteString -- ^ __output__ unicodeEncode = unicodeEncode' id unicodeEncode' :: forall v'1 v'2 tsplits . (OneOf '[Data.Int.Int32, Data.Int.Int64] tsplits) => OpParams -> Tensor v'1 Data.Int.Int32 -- ^ __input_values__ -> Tensor v'2 tsplits -- ^ __input_splits__ -> Tensor Build Data.ByteString.ByteString -- ^ __output__ unicodeEncode' op'options input_values input_splits | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_values, buildInputs input_splits] return (opDef "UnicodeEncode" & opAttr "Tsplits" .~ tensorType (undefined :: tsplits) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_values" type: DT_INT32 } input_arg { name: "input_splits" type_attr: "Tsplits" } output_arg { name: "output" type: DT_STRING } attr { name: "errors" type: "string" default_value { s: "replace" } allowed_values { list { s: "ignore" s: "replace" s: "strict" } } } attr { name: "output_encoding" type: "string" allowed_values { list { s: "UTF-8" s: "UTF-16-BE" s: "UTF-32-BE" } } } attr { name: "replacement_char" type: "int" default_value { i: 65533 } } attr { name: "Tsplits" type: "type" default_value { type: DT_INT64 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | unicodeScript :: Tensor v'1 Data.Int.Int32 -- ^ __input__ -> Tensor Build Data.Int.Int32 -- ^ __output__ unicodeScript = unicodeScript' id unicodeScript' :: OpParams -> Tensor v'1 Data.Int.Int32 -- ^ __input__ -> Tensor Build Data.Int.Int32 -- ^ __output__ unicodeScript' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "UnicodeScript" & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type: DT_INT32 } output_arg { name: "output" type: DT_INT32 } -} -- | unicodeTranscode :: Tensor v'1 Data.ByteString.ByteString -- ^ __input__ -> Tensor Build Data.ByteString.ByteString -- ^ __output__ unicodeTranscode = unicodeTranscode' id unicodeTranscode' :: OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __input__ -> Tensor Build Data.ByteString.ByteString -- ^ __output__ unicodeTranscode' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "UnicodeTranscode" & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type: DT_STRING } output_arg { name: "output" type: DT_STRING } attr { name: "input_encoding" type: "string" } attr { name: "output_encoding" type: "string" allowed_values { list { s: "UTF-8" s: "UTF-16-BE" s: "UTF-32-BE" } } } attr { name: "errors" type: "string" default_value { s: "replace" } allowed_values { list { s: "strict" s: "replace" s: "ignore" } } } attr { name: "replacement_char" type: "int" default_value { i: 65533 } } attr { name: "replace_control_characters" type: "bool" default_value { b: false } } -} -- | uniformCandidateSampler :: forall v'1 m' . (MonadBuild m') => Data.Int.Int64 -- ^ __num_sampled__ -> Data.Int.Int64 -- ^ __num_true__ -> Data.Int.Int64 -- ^ __range_max__ -> Bool -- ^ __unique__ -> Tensor v'1 Data.Int.Int64 -- ^ __true_classes__ -> m' ((Tensor Value Data.Int.Int64, Tensor Value Float, Tensor Value Float)) -- ^ (__sampled_candidates__, __true_expected_count__, __sampled_expected_count__) -- -- * __sampled_candidates__ -- -- * __true_expected_count__ -- -- * __sampled_expected_count__ uniformCandidateSampler = uniformCandidateSampler' id uniformCandidateSampler' :: forall v'1 m' . (MonadBuild m') => OpParams -> Data.Int.Int64 -- ^ __num_sampled__ -> Data.Int.Int64 -- ^ __num_true__ -> Data.Int.Int64 -- ^ __range_max__ -> Bool -- ^ __unique__ -> Tensor v'1 Data.Int.Int64 -- ^ __true_classes__ -> m' ((Tensor Value Data.Int.Int64, Tensor Value Float, Tensor Value Float)) -- ^ (__sampled_candidates__, __true_expected_count__, __sampled_expected_count__) -- -- * __sampled_candidates__ -- -- * __true_expected_count__ -- -- * __sampled_expected_count__ uniformCandidateSampler' op'options num_sampled num_true range_max unique true_classes | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs true_classes] buildOp [] (opDef "UniformCandidateSampler" & opAttr "num_sampled" .~ num_sampled & opAttr "num_true" .~ num_true & opAttr "range_max" .~ range_max & opAttr "unique" .~ unique & op'options & opInputs .~ op'inputs) {- input_arg { name: "true_classes" type: DT_INT64 } output_arg { name: "sampled_candidates" type: DT_INT64 } output_arg { name: "true_expected_count" type: DT_FLOAT } output_arg { name: "sampled_expected_count" type: DT_FLOAT } attr { name: "num_true" type: "int" has_minimum: true minimum: 1 } attr { name: "num_sampled" type: "int" has_minimum: true minimum: 1 } attr { name: "unique" type: "bool" } attr { name: "range_max" type: "int" has_minimum: true minimum: 1 } attr { name: "seed" type: "int" default_value { i: 0 } } attr { name: "seed2" type: "int" default_value { i: 0 } } -} -- | unique :: forall v'1 t out_idx . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] out_idx) => Tensor v'1 t -- ^ __x__ -> (Tensor Build t, Tensor Build out_idx) -- ^ (__y__, __idx__) -- -- * __y__ -- -- * __idx__ unique = unique' id unique' :: forall v'1 t out_idx . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] out_idx) => OpParams -> Tensor v'1 t -- ^ __x__ -> (Tensor Build t, Tensor Build out_idx) -- ^ (__y__, __idx__) -- -- * __y__ -- -- * __idx__ unique' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "Unique" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "out_idx" .~ tensorType (undefined :: out_idx) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } output_arg { name: "idx" type_attr: "out_idx" } attr { name: "T" type: "type" } attr { name: "out_idx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | uniqueV2 :: forall v'1 v'2 t taxis out_idx . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] taxis, OneOf '[Data.Int.Int32, Data.Int.Int64] out_idx) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 taxis -- ^ __axis__ -> (Tensor Build t, Tensor Build out_idx) -- ^ (__y__, __idx__) -- -- * __y__ -- -- * __idx__ uniqueV2 = uniqueV2' id uniqueV2' :: forall v'1 v'2 t taxis out_idx . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] taxis, OneOf '[Data.Int.Int32, Data.Int.Int64] out_idx) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 taxis -- ^ __axis__ -> (Tensor Build t, Tensor Build out_idx) -- ^ (__y__, __idx__) -- -- * __y__ -- -- * __idx__ uniqueV2' op'options x axis | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs axis] return (opDef "UniqueV2" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Taxis" .~ tensorType (undefined :: taxis) & opAttr "out_idx" .~ tensorType (undefined :: out_idx) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "axis" type_attr: "Taxis" } output_arg { name: "y" type_attr: "T" } output_arg { name: "idx" type_attr: "out_idx" } attr { name: "T" type: "type" } attr { name: "Taxis" type: "type" default_value { type: DT_INT64 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "out_idx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | uniqueWithCounts :: forall v'1 t out_idx . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] out_idx) => Tensor v'1 t -- ^ __x__ -> (Tensor Build t, Tensor Build out_idx, Tensor Build out_idx) -- ^ (__y__, __idx__, __count__) -- -- * __y__ -- -- * __idx__ -- -- * __count__ uniqueWithCounts = uniqueWithCounts' id uniqueWithCounts' :: forall v'1 t out_idx . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] out_idx) => OpParams -> Tensor v'1 t -- ^ __x__ -> (Tensor Build t, Tensor Build out_idx, Tensor Build out_idx) -- ^ (__y__, __idx__, __count__) -- -- * __y__ -- -- * __idx__ -- -- * __count__ uniqueWithCounts' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "UniqueWithCounts" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "out_idx" .~ tensorType (undefined :: out_idx) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } output_arg { name: "idx" type_attr: "out_idx" } output_arg { name: "count" type_attr: "out_idx" } attr { name: "T" type: "type" } attr { name: "out_idx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | uniqueWithCountsV2 :: forall v'1 v'2 t taxis out_idx . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] taxis, OneOf '[Data.Int.Int32, Data.Int.Int64] out_idx) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 taxis -- ^ __axis__ -> (Tensor Build t, Tensor Build out_idx, Tensor Build out_idx) -- ^ (__y__, __idx__, __count__) -- -- * __y__ -- -- * __idx__ -- -- * __count__ uniqueWithCountsV2 = uniqueWithCountsV2' id uniqueWithCountsV2' :: forall v'1 v'2 t taxis out_idx . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] taxis, OneOf '[Data.Int.Int32, Data.Int.Int64] out_idx) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 taxis -- ^ __axis__ -> (Tensor Build t, Tensor Build out_idx, Tensor Build out_idx) -- ^ (__y__, __idx__, __count__) -- -- * __y__ -- -- * __idx__ -- -- * __count__ uniqueWithCountsV2' op'options x axis | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs axis] return (opDef "UniqueWithCountsV2" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Taxis" .~ tensorType (undefined :: taxis) & opAttr "out_idx" .~ tensorType (undefined :: out_idx) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "axis" type_attr: "Taxis" } output_arg { name: "y" type_attr: "T" } output_arg { name: "idx" type_attr: "out_idx" } output_arg { name: "count" type_attr: "out_idx" } attr { name: "T" type: "type" } attr { name: "Taxis" type: "type" default_value { type: DT_INT64 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "out_idx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | unpack :: forall v'1 t . (TensorType t) => Data.Int.Int64 -- ^ __num__ -> Tensor v'1 t -- ^ __value__ -> [Tensor Build t] -- ^ __output__ unpack = unpack' id unpack' :: forall v'1 t . (TensorType t) => OpParams -> Data.Int.Int64 -- ^ __num__ -> Tensor v'1 t -- ^ __value__ -> [Tensor Build t] -- ^ __output__ unpack' op'options num value | eqLengthGuard [] = pureOp [num] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs value] return (opDef "Unpack" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "num" .~ num & op'options & opInputs .~ op'inputs) {- input_arg { name: "value" type_attr: "T" } output_arg { name: "output" type_attr: "T" number_attr: "num" } attr { name: "num" type: "int" has_minimum: true } attr { name: "T" type: "type" } attr { name: "axis" type: "int" default_value { i: 0 } } -} -- | unravelIndex :: forall v'1 v'2 tidx . (OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => Tensor v'1 tidx -- ^ __indices__ -> Tensor v'2 tidx -- ^ __dims__ -> Tensor Build tidx -- ^ __output__ unravelIndex = unravelIndex' id unravelIndex' :: forall v'1 v'2 tidx . (OneOf '[Data.Int.Int32, Data.Int.Int64] tidx) => OpParams -> Tensor v'1 tidx -- ^ __indices__ -> Tensor v'2 tidx -- ^ __dims__ -> Tensor Build tidx -- ^ __output__ unravelIndex' op'options indices dims | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs indices, buildInputs dims] return (opDef "UnravelIndex" & opAttr "Tidx" .~ tensorType (undefined :: tidx) & op'options & opInputs .~ op'inputs) {- input_arg { name: "indices" type_attr: "Tidx" } input_arg { name: "dims" type_attr: "Tidx" } output_arg { name: "output" type_attr: "Tidx" } attr { name: "Tidx" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | unsortedSegmentMax :: forall v'1 v'2 v'3 t tindices tnumsegments . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices, OneOf '[Data.Int.Int32, Data.Int.Int64] tnumsegments) => Tensor v'1 t -- ^ __data__ -> Tensor v'2 tindices -- ^ __segment_ids__ -> Tensor v'3 tnumsegments -- ^ __num_segments__ -> Tensor Build t -- ^ __output__ unsortedSegmentMax = unsortedSegmentMax' id unsortedSegmentMax' :: forall v'1 v'2 v'3 t tindices tnumsegments . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices, OneOf '[Data.Int.Int32, Data.Int.Int64] tnumsegments) => OpParams -> Tensor v'1 t -- ^ __data__ -> Tensor v'2 tindices -- ^ __segment_ids__ -> Tensor v'3 tnumsegments -- ^ __num_segments__ -> Tensor Build t -- ^ __output__ unsortedSegmentMax' op'options data' segment_ids num_segments | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs data', buildInputs segment_ids, buildInputs num_segments] return (opDef "UnsortedSegmentMax" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & opAttr "Tnumsegments" .~ tensorType (undefined :: tnumsegments) & op'options & opInputs .~ op'inputs) {- input_arg { name: "data" type_attr: "T" } input_arg { name: "segment_ids" type_attr: "Tindices" } input_arg { name: "num_segments" type_attr: "Tnumsegments" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_INT64 type: DT_BFLOAT16 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "Tnumsegments" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | unsortedSegmentMin :: forall v'1 v'2 v'3 t tindices tnumsegments . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices, OneOf '[Data.Int.Int32, Data.Int.Int64] tnumsegments) => Tensor v'1 t -- ^ __data__ -> Tensor v'2 tindices -- ^ __segment_ids__ -> Tensor v'3 tnumsegments -- ^ __num_segments__ -> Tensor Build t -- ^ __output__ unsortedSegmentMin = unsortedSegmentMin' id unsortedSegmentMin' :: forall v'1 v'2 v'3 t tindices tnumsegments . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices, OneOf '[Data.Int.Int32, Data.Int.Int64] tnumsegments) => OpParams -> Tensor v'1 t -- ^ __data__ -> Tensor v'2 tindices -- ^ __segment_ids__ -> Tensor v'3 tnumsegments -- ^ __num_segments__ -> Tensor Build t -- ^ __output__ unsortedSegmentMin' op'options data' segment_ids num_segments | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs data', buildInputs segment_ids, buildInputs num_segments] return (opDef "UnsortedSegmentMin" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & opAttr "Tnumsegments" .~ tensorType (undefined :: tnumsegments) & op'options & opInputs .~ op'inputs) {- input_arg { name: "data" type_attr: "T" } input_arg { name: "segment_ids" type_attr: "Tindices" } input_arg { name: "num_segments" type_attr: "Tnumsegments" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_INT64 type: DT_BFLOAT16 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "Tnumsegments" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | unsortedSegmentProd :: forall v'1 v'2 v'3 t tindices tnumsegments . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices, OneOf '[Data.Int.Int32, Data.Int.Int64] tnumsegments) => Tensor v'1 t -- ^ __data__ -> Tensor v'2 tindices -- ^ __segment_ids__ -> Tensor v'3 tnumsegments -- ^ __num_segments__ -> Tensor Build t -- ^ __output__ unsortedSegmentProd = unsortedSegmentProd' id unsortedSegmentProd' :: forall v'1 v'2 v'3 t tindices tnumsegments . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices, OneOf '[Data.Int.Int32, Data.Int.Int64] tnumsegments) => OpParams -> Tensor v'1 t -- ^ __data__ -> Tensor v'2 tindices -- ^ __segment_ids__ -> Tensor v'3 tnumsegments -- ^ __num_segments__ -> Tensor Build t -- ^ __output__ unsortedSegmentProd' op'options data' segment_ids num_segments | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs data', buildInputs segment_ids, buildInputs num_segments] return (opDef "UnsortedSegmentProd" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & opAttr "Tnumsegments" .~ tensorType (undefined :: tnumsegments) & op'options & opInputs .~ op'inputs) {- input_arg { name: "data" type_attr: "T" } input_arg { name: "segment_ids" type_attr: "Tindices" } input_arg { name: "num_segments" type_attr: "Tnumsegments" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "Tnumsegments" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | unsortedSegmentSum :: forall v'1 v'2 v'3 t tindices tnumsegments . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices, OneOf '[Data.Int.Int32, Data.Int.Int64] tnumsegments) => Tensor v'1 t -- ^ __data__ -> Tensor v'2 tindices -- ^ __segment_ids__ -> Tensor v'3 tnumsegments -- ^ __num_segments__ -> Tensor Build t -- ^ __output__ unsortedSegmentSum = unsortedSegmentSum' id unsortedSegmentSum' :: forall v'1 v'2 v'3 t tindices tnumsegments . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices, OneOf '[Data.Int.Int32, Data.Int.Int64] tnumsegments) => OpParams -> Tensor v'1 t -- ^ __data__ -> Tensor v'2 tindices -- ^ __segment_ids__ -> Tensor v'3 tnumsegments -- ^ __num_segments__ -> Tensor Build t -- ^ __output__ unsortedSegmentSum' op'options data' segment_ids num_segments | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs data', buildInputs segment_ids, buildInputs num_segments] return (opDef "UnsortedSegmentSum" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & opAttr "Tnumsegments" .~ tensorType (undefined :: tnumsegments) & op'options & opInputs .~ op'inputs) {- input_arg { name: "data" type_attr: "T" } input_arg { name: "segment_ids" type_attr: "Tindices" } input_arg { name: "num_segments" type_attr: "Tnumsegments" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "Tnumsegments" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | unstage :: forall dtypes m' . (MonadBuild m', TensorTypes dtypes) => m' (TensorList (Value) dtypes) -- ^ __values__ unstage = unstage' id unstage' :: forall dtypes m' . (MonadBuild m', TensorTypes dtypes) => OpParams -> m' (TensorList (Value) dtypes) -- ^ __values__ unstage' op'options | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "Unstage" & opAttr "dtypes" .~ fromTensorTypes (Proxy :: Proxy dtypes) & op'options & opInputs .~ op'inputs) {- output_arg { name: "values" type_list_attr: "dtypes" } attr { name: "capacity" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "memory_limit" type: "int" default_value { i: 0 } has_minimum: true } attr { name: "dtypes" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } -} -- | unwrapDatasetVariant :: Tensor v'1 Variant -- ^ __input_handle__ -> Tensor Build Variant -- ^ __output_handle__ unwrapDatasetVariant = unwrapDatasetVariant' id unwrapDatasetVariant' :: OpParams -> Tensor v'1 Variant -- ^ __input_handle__ -> Tensor Build Variant -- ^ __output_handle__ unwrapDatasetVariant' op'options input_handle | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_handle] return (opDef "UnwrapDatasetVariant" & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_handle" type: DT_VARIANT } output_arg { name: "output_handle" type: DT_VARIANT } -} -- | upperBound :: forall v'1 v'2 t out_type . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] out_type) => Tensor v'1 t -- ^ __sorted_inputs__ -> Tensor v'2 t -- ^ __values__ -> Tensor Build out_type -- ^ __output__ upperBound = upperBound' id upperBound' :: forall v'1 v'2 t out_type . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] out_type) => OpParams -> Tensor v'1 t -- ^ __sorted_inputs__ -> Tensor v'2 t -- ^ __values__ -> Tensor Build out_type -- ^ __output__ upperBound' op'options sorted_inputs values | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs sorted_inputs, buildInputs values] return (opDef "UpperBound" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "out_type" .~ tensorType (undefined :: out_type) & op'options & opInputs .~ op'inputs) {- input_arg { name: "sorted_inputs" type_attr: "T" } input_arg { name: "values" type_attr: "T" } output_arg { name: "output" type_attr: "out_type" } attr { name: "T" type: "type" } attr { name: "out_type" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | varHandleOp :: forall m' . (MonadBuild m') => DataType -- ^ __dtype__ -> Shape -- ^ __shape__ -> m' (Tensor Value ResourceHandle) -- ^ __resource__ varHandleOp = varHandleOp' id varHandleOp' :: forall m' . (MonadBuild m') => OpParams -> DataType -- ^ __dtype__ -> Shape -- ^ __shape__ -> m' (Tensor Value ResourceHandle) -- ^ __resource__ varHandleOp' op'options dtype shape | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "VarHandleOp" & opAttr "dtype" .~ dtype & opAttr "shape" .~ shape & op'options & opInputs .~ op'inputs) {- output_arg { name: "resource" type: DT_RESOURCE } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } attr { name: "dtype" type: "type" } attr { name: "shape" type: "shape" } -} -- | varIsInitializedOp :: forall v'1 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __resource__ -> m' (Tensor Value Bool) -- ^ __is_initialized__ varIsInitializedOp = varIsInitializedOp' id varIsInitializedOp' :: forall v'1 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __resource__ -> m' (Tensor Value Bool) -- ^ __is_initialized__ varIsInitializedOp' op'options resource | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs resource] buildOp [] (opDef "VarIsInitializedOp" & op'options & opInputs .~ op'inputs) {- input_arg { name: "resource" type: DT_RESOURCE } output_arg { name: "is_initialized" type: DT_BOOL } -} -- | variable :: forall dtype m' . (MonadBuild m', TensorType dtype) => Shape -- ^ __shape__ -> m' (Tensor Ref dtype) -- ^ __ref__ variable = variable' id variable' :: forall dtype m' . (MonadBuild m', TensorType dtype) => OpParams -> Shape -- ^ __shape__ -> m' (Tensor Ref dtype) -- ^ __ref__ variable' op'options shape | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "Variable" & opAttr "dtype" .~ tensorType (undefined :: dtype) & opAttr "shape" .~ shape & op'options & opInputs .~ op'inputs) {- output_arg { name: "ref" type_attr: "dtype" is_ref: true } attr { name: "shape" type: "shape" } attr { name: "dtype" type: "type" } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } -} -- | variableShape :: forall v'1 out_type m' . (MonadBuild m', OneOf '[Data.Int.Int32, Data.Int.Int64] out_type) => Tensor v'1 ResourceHandle -- ^ __input__ -> m' (Tensor Value out_type) -- ^ __output__ variableShape = variableShape' id variableShape' :: forall v'1 out_type m' . (MonadBuild m', OneOf '[Data.Int.Int32, Data.Int.Int64] out_type) => OpParams -> Tensor v'1 ResourceHandle -- ^ __input__ -> m' (Tensor Value out_type) -- ^ __output__ variableShape' op'options input | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] buildOp [] (opDef "VariableShape" & opAttr "out_type" .~ tensorType (undefined :: out_type) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type: DT_RESOURCE } output_arg { name: "output" type_attr: "out_type" } attr { name: "out_type" type: "type" default_value { type: DT_INT32 } allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | variableV2 :: forall dtype m' . (MonadBuild m', TensorType dtype) => Shape -- ^ __shape__ -> m' (Tensor Ref dtype) -- ^ __ref__ variableV2 = variableV2' id variableV2' :: forall dtype m' . (MonadBuild m', TensorType dtype) => OpParams -> Shape -- ^ __shape__ -> m' (Tensor Ref dtype) -- ^ __ref__ variableV2' op'options shape | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "VariableV2" & opAttr "dtype" .~ tensorType (undefined :: dtype) & opAttr "shape" .~ shape & op'options & opInputs .~ op'inputs) {- output_arg { name: "ref" type_attr: "dtype" is_ref: true } attr { name: "shape" type: "shape" } attr { name: "dtype" type: "type" } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } -} -- | where' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Bool, Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __input__ -> Tensor Build Data.Int.Int64 -- ^ __index__ where' = where'' id where'' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Bool, Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor Build Data.Int.Int64 -- ^ __index__ where'' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "Where" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "index" type: DT_INT64 } attr { name: "T" type: "type" default_value { type: DT_BOOL } allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 type: DT_BOOL } } } -} -- | wholeFileReader :: forall m' . (MonadBuild m') => m' (Tensor Ref Data.ByteString.ByteString) -- ^ __reader_handle__ wholeFileReader = wholeFileReader' id wholeFileReader' :: forall m' . (MonadBuild m') => OpParams -> m' (Tensor Ref Data.ByteString.ByteString) -- ^ __reader_handle__ wholeFileReader' op'options | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "WholeFileReader" & op'options & opInputs .~ op'inputs) {- output_arg { name: "reader_handle" type: DT_STRING is_ref: true } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } -} -- | wholeFileReaderV2 :: forall m' . (MonadBuild m') => m' (Tensor Value ResourceHandle) -- ^ __reader_handle__ wholeFileReaderV2 = wholeFileReaderV2' id wholeFileReaderV2' :: forall m' . (MonadBuild m') => OpParams -> m' (Tensor Value ResourceHandle) -- ^ __reader_handle__ wholeFileReaderV2' op'options | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "WholeFileReaderV2" & op'options & opInputs .~ op'inputs) {- output_arg { name: "reader_handle" type: DT_RESOURCE } attr { name: "container" type: "string" default_value { s: "" } } attr { name: "shared_name" type: "string" default_value { s: "" } } -} -- | windowDataset :: [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor v'2 Data.Int.Int64 -- ^ __size__ -> Tensor v'3 Data.Int.Int64 -- ^ __shift__ -> Tensor v'4 Data.Int.Int64 -- ^ __stride__ -> Tensor v'5 Bool -- ^ __drop_remainder__ -> Tensor Build Variant -- ^ __handle__ windowDataset = windowDataset' id windowDataset' :: OpParams -> [DataType] -- ^ __output_types__ -> Tensor v'1 Variant -- ^ __input_dataset__ -> Tensor v'2 Data.Int.Int64 -- ^ __size__ -> Tensor v'3 Data.Int.Int64 -- ^ __shift__ -> Tensor v'4 Data.Int.Int64 -- ^ __stride__ -> Tensor v'5 Bool -- ^ __drop_remainder__ -> Tensor Build Variant -- ^ __handle__ windowDataset' op'options output_types input_dataset size shift stride drop_remainder | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_dataset, buildInputs size, buildInputs shift, buildInputs stride, buildInputs drop_remainder] return (opDef "WindowDataset" & opAttr "output_types" .~ output_types & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_dataset" type: DT_VARIANT } input_arg { name: "size" type: DT_INT64 } input_arg { name: "shift" type: DT_INT64 } input_arg { name: "stride" type: DT_INT64 } input_arg { name: "drop_remainder" type: DT_BOOL } output_arg { name: "handle" type: DT_VARIANT } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } -} -- | workerHeartbeat :: forall v'1 m' . (MonadBuild m') => Tensor v'1 Data.ByteString.ByteString -- ^ __request__ -> m' (Tensor Value Data.ByteString.ByteString) -- ^ __response__ workerHeartbeat = workerHeartbeat' id workerHeartbeat' :: forall v'1 m' . (MonadBuild m') => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __request__ -> m' (Tensor Value Data.ByteString.ByteString) -- ^ __response__ workerHeartbeat' op'options request | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs request] buildOp [] (opDef "WorkerHeartbeat" & op'options & opInputs .~ op'inputs) {- input_arg { name: "request" type: DT_STRING } output_arg { name: "response" type: DT_STRING } -} -- | wrapDatasetVariant :: Tensor v'1 Variant -- ^ __input_handle__ -> Tensor Build Variant -- ^ __output_handle__ wrapDatasetVariant = wrapDatasetVariant' id wrapDatasetVariant' :: OpParams -> Tensor v'1 Variant -- ^ __input_handle__ -> Tensor Build Variant -- ^ __output_handle__ wrapDatasetVariant' op'options input_handle | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_handle] return (opDef "WrapDatasetVariant" & op'options & opInputs .~ op'inputs) {- input_arg { name: "input_handle" type: DT_VARIANT } output_arg { name: "output_handle" type: DT_VARIANT } -} -- | writeAudioSummary :: forall v'1 v'2 v'3 v'4 v'5 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __writer__ -> Tensor v'2 Data.Int.Int64 -- ^ __step__ -> Tensor v'3 Data.ByteString.ByteString -- ^ __tag__ -> Tensor v'4 Float -- ^ __tensor__ -> Tensor v'5 Float -- ^ __sample_rate__ -> m' (ControlNode) writeAudioSummary = writeAudioSummary' id writeAudioSummary' :: forall v'1 v'2 v'3 v'4 v'5 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __writer__ -> Tensor v'2 Data.Int.Int64 -- ^ __step__ -> Tensor v'3 Data.ByteString.ByteString -- ^ __tag__ -> Tensor v'4 Float -- ^ __tensor__ -> Tensor v'5 Float -- ^ __sample_rate__ -> m' (ControlNode) writeAudioSummary' op'options writer step tag tensor sample_rate | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs writer, buildInputs step, buildInputs tag, buildInputs tensor, buildInputs sample_rate] buildOp [] (opDef "WriteAudioSummary" & op'options & opInputs .~ op'inputs) {- input_arg { name: "writer" type: DT_RESOURCE } input_arg { name: "step" type: DT_INT64 } input_arg { name: "tag" type: DT_STRING } input_arg { name: "tensor" type: DT_FLOAT } input_arg { name: "sample_rate" type: DT_FLOAT } attr { name: "max_outputs" type: "int" default_value { i: 3 } has_minimum: true minimum: 1 } -} -- | writeFile :: forall v'1 v'2 m' . (MonadBuild m') => Tensor v'1 Data.ByteString.ByteString -- ^ __filename__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __contents__ -> m' (ControlNode) writeFile = writeFile' id writeFile' :: forall v'1 v'2 m' . (MonadBuild m') => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __filename__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __contents__ -> m' (ControlNode) writeFile' op'options filename contents | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs filename, buildInputs contents] buildOp [] (opDef "WriteFile" & op'options & opInputs .~ op'inputs) {- input_arg { name: "filename" type: DT_STRING } input_arg { name: "contents" type: DT_STRING } -} -- | writeGraphSummary :: forall v'1 v'2 v'3 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __writer__ -> Tensor v'2 Data.Int.Int64 -- ^ __step__ -> Tensor v'3 Data.ByteString.ByteString -- ^ __tensor__ -> m' (ControlNode) writeGraphSummary = writeGraphSummary' id writeGraphSummary' :: forall v'1 v'2 v'3 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __writer__ -> Tensor v'2 Data.Int.Int64 -- ^ __step__ -> Tensor v'3 Data.ByteString.ByteString -- ^ __tensor__ -> m' (ControlNode) writeGraphSummary' op'options writer step tensor | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs writer, buildInputs step, buildInputs tensor] buildOp [] (opDef "WriteGraphSummary" & op'options & opInputs .~ op'inputs) {- input_arg { name: "writer" type: DT_RESOURCE } input_arg { name: "step" type: DT_INT64 } input_arg { name: "tensor" type: DT_STRING } -} -- | writeHistogramSummary :: forall v'1 v'2 v'3 v'4 t m' . (MonadBuild m', OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor v'1 ResourceHandle -- ^ __writer__ -> Tensor v'2 Data.Int.Int64 -- ^ __step__ -> Tensor v'3 Data.ByteString.ByteString -- ^ __tag__ -> Tensor v'4 t -- ^ __values__ -> m' (ControlNode) writeHistogramSummary = writeHistogramSummary' id writeHistogramSummary' :: forall v'1 v'2 v'3 v'4 t m' . (MonadBuild m', OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 ResourceHandle -- ^ __writer__ -> Tensor v'2 Data.Int.Int64 -- ^ __step__ -> Tensor v'3 Data.ByteString.ByteString -- ^ __tag__ -> Tensor v'4 t -- ^ __values__ -> m' (ControlNode) writeHistogramSummary' op'options writer step tag values | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs writer, buildInputs step, buildInputs tag, buildInputs values] buildOp [] (opDef "WriteHistogramSummary" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "writer" type: DT_RESOURCE } input_arg { name: "step" type: DT_INT64 } input_arg { name: "tag" type: DT_STRING } input_arg { name: "values" type_attr: "T" } attr { name: "T" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_INT64 type: DT_BFLOAT16 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } -} -- | writeImageSummary :: forall v'1 v'2 v'3 v'4 v'5 t m' . (MonadBuild m', OneOf '[Data.Word.Word16, Data.Word.Word8, Float] t) => Tensor v'1 ResourceHandle -- ^ __writer__ -> Tensor v'2 Data.Int.Int64 -- ^ __step__ -> Tensor v'3 Data.ByteString.ByteString -- ^ __tag__ -> Tensor v'4 t -- ^ __tensor__ -> Tensor v'5 Data.Word.Word8 -- ^ __bad_color__ -> m' (ControlNode) writeImageSummary = writeImageSummary' id writeImageSummary' :: forall v'1 v'2 v'3 v'4 v'5 t m' . (MonadBuild m', OneOf '[Data.Word.Word16, Data.Word.Word8, Float] t) => OpParams -> Tensor v'1 ResourceHandle -- ^ __writer__ -> Tensor v'2 Data.Int.Int64 -- ^ __step__ -> Tensor v'3 Data.ByteString.ByteString -- ^ __tag__ -> Tensor v'4 t -- ^ __tensor__ -> Tensor v'5 Data.Word.Word8 -- ^ __bad_color__ -> m' (ControlNode) writeImageSummary' op'options writer step tag tensor bad_color | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs writer, buildInputs step, buildInputs tag, buildInputs tensor, buildInputs bad_color] buildOp [] (opDef "WriteImageSummary" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "writer" type: DT_RESOURCE } input_arg { name: "step" type: DT_INT64 } input_arg { name: "tag" type: DT_STRING } input_arg { name: "tensor" type_attr: "T" } input_arg { name: "bad_color" type: DT_UINT8 } attr { name: "max_images" type: "int" default_value { i: 3 } has_minimum: true minimum: 1 } attr { name: "T" type: "type" default_value { type: DT_FLOAT } allowed_values { list { type: DT_UINT8 type: DT_FLOAT type: DT_HALF } } } -} -- | writeRawProtoSummary :: forall v'1 v'2 v'3 m' . (MonadBuild m') => Tensor v'1 ResourceHandle -- ^ __writer__ -> Tensor v'2 Data.Int.Int64 -- ^ __step__ -> Tensor v'3 Data.ByteString.ByteString -- ^ __tensor__ -> m' (ControlNode) writeRawProtoSummary = writeRawProtoSummary' id writeRawProtoSummary' :: forall v'1 v'2 v'3 m' . (MonadBuild m') => OpParams -> Tensor v'1 ResourceHandle -- ^ __writer__ -> Tensor v'2 Data.Int.Int64 -- ^ __step__ -> Tensor v'3 Data.ByteString.ByteString -- ^ __tensor__ -> m' (ControlNode) writeRawProtoSummary' op'options writer step tensor | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs writer, buildInputs step, buildInputs tensor] buildOp [] (opDef "WriteRawProtoSummary" & op'options & opInputs .~ op'inputs) {- input_arg { name: "writer" type: DT_RESOURCE } input_arg { name: "step" type: DT_INT64 } input_arg { name: "tensor" type: DT_STRING } -} -- | writeScalarSummary :: forall v'1 v'2 v'3 v'4 t m' . (MonadBuild m', OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor v'1 ResourceHandle -- ^ __writer__ -> Tensor v'2 Data.Int.Int64 -- ^ __step__ -> Tensor v'3 Data.ByteString.ByteString -- ^ __tag__ -> Tensor v'4 t -- ^ __value__ -> m' (ControlNode) writeScalarSummary = writeScalarSummary' id writeScalarSummary' :: forall v'1 v'2 v'3 v'4 t m' . (MonadBuild m', OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 ResourceHandle -- ^ __writer__ -> Tensor v'2 Data.Int.Int64 -- ^ __step__ -> Tensor v'3 Data.ByteString.ByteString -- ^ __tag__ -> Tensor v'4 t -- ^ __value__ -> m' (ControlNode) writeScalarSummary' op'options writer step tag value | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs writer, buildInputs step, buildInputs tag, buildInputs value] buildOp [] (opDef "WriteScalarSummary" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "writer" type: DT_RESOURCE } input_arg { name: "step" type: DT_INT64 } input_arg { name: "tag" type: DT_STRING } input_arg { name: "value" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_INT64 type: DT_BFLOAT16 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } -} -- | writeSummary :: forall v'1 v'2 v'3 v'4 v'5 t m' . (MonadBuild m', TensorType t) => Tensor v'1 ResourceHandle -- ^ __writer__ -> Tensor v'2 Data.Int.Int64 -- ^ __step__ -> Tensor v'3 t -- ^ __tensor__ -> Tensor v'4 Data.ByteString.ByteString -- ^ __tag__ -> Tensor v'5 Data.ByteString.ByteString -- ^ __summary_metadata__ -> m' (ControlNode) writeSummary = writeSummary' id writeSummary' :: forall v'1 v'2 v'3 v'4 v'5 t m' . (MonadBuild m', TensorType t) => OpParams -> Tensor v'1 ResourceHandle -- ^ __writer__ -> Tensor v'2 Data.Int.Int64 -- ^ __step__ -> Tensor v'3 t -- ^ __tensor__ -> Tensor v'4 Data.ByteString.ByteString -- ^ __tag__ -> Tensor v'5 Data.ByteString.ByteString -- ^ __summary_metadata__ -> m' (ControlNode) writeSummary' op'options writer step tensor tag summary_metadata | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs writer, buildInputs step, buildInputs tensor, buildInputs tag, buildInputs summary_metadata] buildOp [] (opDef "WriteSummary" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "writer" type: DT_RESOURCE } input_arg { name: "step" type: DT_INT64 } input_arg { name: "tensor" type_attr: "T" } input_arg { name: "tag" type: DT_STRING } input_arg { name: "summary_metadata" type: DT_STRING } attr { name: "T" type: "type" } -} -- | xdivy :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ xdivy = xdivy' id xdivy' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ xdivy' op'options x y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y] return (opDef "Xdivy" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | Helper operator for performing XLA-style broadcasts -- -- Broadcasts `lhs` and `rhs` to the same rank, by adding size 1 dimensions to -- whichever of `lhs` and `rhs` has the lower rank, using XLA's broadcasting rules -- for binary operators. xlaBroadcastHelper :: forall v'1 v'2 v'3 t tindices . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor v'1 t -- ^ __lhs__: the LHS input tensor -> Tensor v'2 t -- ^ __rhs__: the RHS input tensor -> Tensor v'3 tindices -- ^ __broadcast_dims__: an XLA-style broadcast dimension specification -> (Tensor Build t, Tensor Build t) -- ^ (__lhs_output__, __rhs_output__) -- -- * __lhs_output__: the broadcasted LHS tensor -- -- * __rhs_output__: the broadcasted RHS tensor xlaBroadcastHelper = xlaBroadcastHelper' id xlaBroadcastHelper' :: forall v'1 v'2 v'3 t tindices . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor v'1 t -- ^ __lhs__: the LHS input tensor -> Tensor v'2 t -- ^ __rhs__: the RHS input tensor -> Tensor v'3 tindices -- ^ __broadcast_dims__: an XLA-style broadcast dimension specification -> (Tensor Build t, Tensor Build t) -- ^ (__lhs_output__, __rhs_output__) -- -- * __lhs_output__: the broadcasted LHS tensor -- -- * __rhs_output__: the broadcasted RHS tensor xlaBroadcastHelper' op'options lhs rhs broadcast_dims | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs lhs, buildInputs rhs, buildInputs broadcast_dims] return (opDef "XlaBroadcastHelper" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "lhs" description: "the LHS input tensor" type_attr: "T" } input_arg { name: "rhs" description: "the RHS input tensor" type_attr: "T" } input_arg { name: "broadcast_dims" description: "an XLA-style broadcast dimension specification" type_attr: "Tindices" } output_arg { name: "lhs_output" description: "the broadcasted LHS tensor" type_attr: "T" } output_arg { name: "rhs_output" description: "the broadcasted RHS tensor" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | Operator that connects the output of an XLA computation to other consumer graph nodes. xlaClusterOutput :: forall v'1 t . (TensorType t) => Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __outputs__ xlaClusterOutput = xlaClusterOutput' id xlaClusterOutput' :: forall v'1 t . (TensorType t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor Build t -- ^ __outputs__ xlaClusterOutput' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "XlaClusterOutput" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_attr: "T" } output_arg { name: "outputs" type_attr: "T" } attr { name: "T" type: "type" } -} -- | Wraps the XLA ConvGeneralDilated operator, documented at -- -- https://www.tensorflow.org/performance/xla/operation_semantics#conv_convolution -- . xlaConv :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 t tindices . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor v'1 t -- ^ __lhs__: the input tensor -> Tensor v'2 t -- ^ __rhs__: the kernel tensor -> Tensor v'3 tindices -- ^ __window_strides__: the inter-window strides -> Tensor v'4 tindices -- ^ __padding__: the padding to apply at the start and end of each input dimensions -> Tensor v'5 tindices -- ^ __lhs_dilation__: dilation to apply between input elements -> Tensor v'6 tindices -- ^ __rhs_dilation__: dilation to apply between kernel elements -> Tensor v'7 tindices -- ^ __feature_group_count__: number of feature groups for grouped convolution. -> Tensor Build t -- ^ __output__ xlaConv = xlaConv' id xlaConv' :: forall v'1 v'2 v'3 v'4 v'5 v'6 v'7 t tindices . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor v'1 t -- ^ __lhs__: the input tensor -> Tensor v'2 t -- ^ __rhs__: the kernel tensor -> Tensor v'3 tindices -- ^ __window_strides__: the inter-window strides -> Tensor v'4 tindices -- ^ __padding__: the padding to apply at the start and end of each input dimensions -> Tensor v'5 tindices -- ^ __lhs_dilation__: dilation to apply between input elements -> Tensor v'6 tindices -- ^ __rhs_dilation__: dilation to apply between kernel elements -> Tensor v'7 tindices -- ^ __feature_group_count__: number of feature groups for grouped convolution. -> Tensor Build t -- ^ __output__ xlaConv' op'options lhs rhs window_strides padding lhs_dilation rhs_dilation feature_group_count | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs lhs, buildInputs rhs, buildInputs window_strides, buildInputs padding, buildInputs lhs_dilation, buildInputs rhs_dilation, buildInputs feature_group_count] return (opDef "XlaConv" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "lhs" description: "the input tensor" type_attr: "T" } input_arg { name: "rhs" description: "the kernel tensor" type_attr: "T" } input_arg { name: "window_strides" description: "the inter-window strides" type_attr: "Tindices" } input_arg { name: "padding" description: "the padding to apply at the start and end of each input dimensions" type_attr: "Tindices" } input_arg { name: "lhs_dilation" description: "dilation to apply between input elements" type_attr: "Tindices" } input_arg { name: "rhs_dilation" description: "dilation to apply between kernel elements" type_attr: "Tindices" } input_arg { name: "feature_group_count" description: "number of feature groups for grouped convolution." type_attr: "Tindices" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } attr { name: "dimension_numbers" type: "string" description: "a serialized xla::ConvolutionDimensionNumbers proto." } attr { name: "precision_config" type: "string" description: "a serialized xla::PrecisionConfig proto." } -} -- | Takes the packed uint32 input and unpacks the input to uint8 to do -- -- Dequantization on deivce. xlaDequantize :: forall v'1 m' . (MonadBuild m') => Float -- ^ __max_range__: The maximum scalar value possibly produced for the input. -> Float -- ^ __min_range__: The minimum scalar value possibly produced for the input. -> Bool -- ^ __transpose_output__: Boolean to determine if output is transposed. transpose_output -- is faster when input is large and rank of input is higher than 1. -> Tensor v'1 Data.Word.Word32 -- ^ __input__: Input tensors whose types is uint32, shape is [d0, ..., dn]. -> m' (Tensor Value Data.Word.Word16) -- ^ __output__: Output tensors whose types is bloat16. If transpose_output is true, -- output shape is [dn * 4, dn-1, ..., d1, d0]. If transpose_output -- is false, output shape is [d0,..., dn * 4]. xlaDequantize = xlaDequantize' id xlaDequantize' :: forall v'1 m' . (MonadBuild m') => OpParams -> Float -- ^ __max_range__: The maximum scalar value possibly produced for the input. -> Float -- ^ __min_range__: The minimum scalar value possibly produced for the input. -> Bool -- ^ __transpose_output__: Boolean to determine if output is transposed. transpose_output -- is faster when input is large and rank of input is higher than 1. -> Tensor v'1 Data.Word.Word32 -- ^ __input__: Input tensors whose types is uint32, shape is [d0, ..., dn]. -> m' (Tensor Value Data.Word.Word16) -- ^ __output__: Output tensors whose types is bloat16. If transpose_output is true, -- output shape is [dn * 4, dn-1, ..., d1, d0]. If transpose_output -- is false, output shape is [d0,..., dn * 4]. xlaDequantize' op'options max_range min_range transpose_output input | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] buildOp [] (opDef "XlaDequantize" & opAttr "max_range" .~ max_range & opAttr "min_range" .~ min_range & opAttr "transpose_output" .~ transpose_output & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "Input tensors whose types is uint32, shape is [d0, ..., dn]." type: DT_UINT32 } output_arg { name: "output" description: "Output tensors whose types is bloat16. If transpose_output is true,\noutput shape is [dn * 4, dn-1, ..., d1, d0]. If transpose_output\nis false, output shape is [d0,..., dn * 4]." type: DT_BFLOAT16 } attr { name: "min_range" type: "float" description: "The minimum scalar value possibly produced for the input." } attr { name: "max_range" type: "float" description: "The maximum scalar value possibly produced for the input." } attr { name: "mode" type: "string" description: "String to determine the dequantize mode in {\"MIN_COMBINED\", \"MIN_FIRST\", \"SCALED\"}." } attr { name: "transpose_output" type: "bool" description: "Boolean to determine if output is transposed. transpose_output\nis faster when input is large and rank of input is higher than 1." } -} -- | Wraps the XLA ConvGeneralDilated operator, documented at -- -- https://www.tensorflow.org/performance/xla/operation_semantics#dotgeneral -- . xlaDot :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __lhs__: the LHS tensor -> Tensor v'2 t -- ^ __rhs__: the RHS tensor -> Tensor Build t -- ^ __output__ xlaDot = xlaDot' id xlaDot' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __lhs__: the LHS tensor -> Tensor v'2 t -- ^ __rhs__: the RHS tensor -> Tensor Build t -- ^ __output__ xlaDot' op'options lhs rhs | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs lhs, buildInputs rhs] return (opDef "XlaDot" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "lhs" description: "the LHS tensor" type_attr: "T" } input_arg { name: "rhs" description: "the RHS tensor" type_attr: "T" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "dimension_numbers" type: "string" description: "a serialized xla::DotDimensionNumbers proto." } attr { name: "precision_config" type: "string" description: "a serialized xla::PrecisionConfig proto." } -} -- | Wraps the XLA DynamicSlice operator, documented at -- -- https://www.tensorflow.org/performance/xla/operation_semantics#dynamicslice -- . -- -- DynamicSlice extracts a sub-array from the input array at dynamic -- start_indices. The size of the slice in each dimension is passed in -- size_indices, which specify the end point of exclusive slice intervals in each -- dimension -- [start, start + size). The shape of start_indices must have rank 1, -- with dimension size equal to the rank of operand. xlaDynamicSlice :: forall v'1 v'2 v'3 t tindices . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor v'1 t -- ^ __input__: A `Tensor` of type T. -> Tensor v'2 tindices -- ^ __start_indices__: List of N integers containing the slice size for each -- dimension. Each value must be strictly greater than zero, and start + size -- must be less than or equal to the size of the dimension to avoid -- implementation defined behavior. -> Tensor v'3 tindices -- ^ __size_indices__ -> Tensor Build t -- ^ __output__ xlaDynamicSlice = xlaDynamicSlice' id xlaDynamicSlice' :: forall v'1 v'2 v'3 t tindices . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor v'1 t -- ^ __input__: A `Tensor` of type T. -> Tensor v'2 tindices -- ^ __start_indices__: List of N integers containing the slice size for each -- dimension. Each value must be strictly greater than zero, and start + size -- must be less than or equal to the size of the dimension to avoid -- implementation defined behavior. -> Tensor v'3 tindices -- ^ __size_indices__ -> Tensor Build t -- ^ __output__ xlaDynamicSlice' op'options input start_indices size_indices | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs start_indices, buildInputs size_indices] return (opDef "XlaDynamicSlice" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "A `Tensor` of type T." type_attr: "T" } input_arg { name: "start_indices" description: "List of N integers containing the slice size for each\ndimension. Each value must be strictly greater than zero, and start + size\nmust be less than or equal to the size of the dimension to avoid\nimplementation defined behavior." type_attr: "Tindices" } input_arg { name: "size_indices" type_attr: "Tindices" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | Wraps the XLA DynamicUpdateSlice operator, documented at -- -- https://www.tensorflow.org/performance/xla/operation_semantics#dynamicupdateslice -- . -- -- XlaDynamicUpdateSlice generates a result which is the value of the `input` -- operand, with a slice update overwritten at `indices`. The shape of `update` -- determines the shape of the sub-array of the result which is updated. The shape -- of indices must be rank == 1, with dimension size equal to the rank of `input`. -- -- Handling of out-of-bounds slice indices is implementation-defined. xlaDynamicUpdateSlice :: forall v'1 v'2 v'3 t tindices . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor v'1 t -- ^ __input__: A `Tensor` of type T. -> Tensor v'2 t -- ^ __update__: A `Tensor` of type T. Same rank as `input`. -> Tensor v'3 tindices -- ^ __indices__: A vector of indices into `input`. Must have length equal to the rank of -- `input`. -> Tensor Build t -- ^ __output__: A `Tensor` of type T. xlaDynamicUpdateSlice = xlaDynamicUpdateSlice' id xlaDynamicUpdateSlice' :: forall v'1 v'2 v'3 t tindices . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor v'1 t -- ^ __input__: A `Tensor` of type T. -> Tensor v'2 t -- ^ __update__: A `Tensor` of type T. Same rank as `input`. -> Tensor v'3 tindices -- ^ __indices__: A vector of indices into `input`. Must have length equal to the rank of -- `input`. -> Tensor Build t -- ^ __output__: A `Tensor` of type T. xlaDynamicUpdateSlice' op'options input update indices | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs update, buildInputs indices] return (opDef "XlaDynamicUpdateSlice" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "A `Tensor` of type T." type_attr: "T" } input_arg { name: "update" description: "A `Tensor` of type T. Same rank as `input`." type_attr: "T" } input_arg { name: "indices" description: "A vector of indices into `input`. Must have length equal to the rank of\n`input`." type_attr: "Tindices" } output_arg { name: "output" description: "A `Tensor` of type T." type_attr: "T" } attr { name: "T" type: "type" } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | An op which supports basic einsum op with 2 inputs and 1 output. -- -- This op has better TPU performnce since it doesn't have explicitly reshape and -- transpose operations as tf.einsum does. xlaEinsum :: forall v'1 v'2 t . (OneOf '[Data.Word.Word16, Float] t) => Tensor v'1 t -- ^ __a__ -> Tensor v'2 t -- ^ __b__ -> Tensor Build t -- ^ __product__ xlaEinsum = xlaEinsum' id xlaEinsum' :: forall v'1 v'2 t . (OneOf '[Data.Word.Word16, Float] t) => OpParams -> Tensor v'1 t -- ^ __a__ -> Tensor v'2 t -- ^ __b__ -> Tensor Build t -- ^ __product__ xlaEinsum' op'options a b | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs a, buildInputs b] return (opDef "XlaEinsum" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "a" type_attr: "T" } input_arg { name: "b" type_attr: "T" } output_arg { name: "product" type_attr: "T" } attr { name: "equation" type: "string" } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_FLOAT } } } -} -- | Wraps the XLA Sort operator, documented at -- -- https://www.tensorflow.org/performance/xla/operation_semantics#sort -- . -- -- Sorts a tensor. Currently only sorts in ascending order are supported. xlaKeyValueSort :: forall v'1 v'2 k v . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] k, TensorType v) => Tensor v'1 k -- ^ __keys__: A `Tensor` of type K. -> Tensor v'2 v -- ^ __values__: A `Tensor` of type V. -> (Tensor Build k, Tensor Build v) -- ^ (__sorted_keys__, __sorted_values__) -- -- * __sorted_keys__: A `Tensor` of type K. -- -- * __sorted_values__: A `Tensor` of type V. xlaKeyValueSort = xlaKeyValueSort' id xlaKeyValueSort' :: forall v'1 v'2 k v . (OneOf '[Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] k, TensorType v) => OpParams -> Tensor v'1 k -- ^ __keys__: A `Tensor` of type K. -> Tensor v'2 v -- ^ __values__: A `Tensor` of type V. -> (Tensor Build k, Tensor Build v) -- ^ (__sorted_keys__, __sorted_values__) -- -- * __sorted_keys__: A `Tensor` of type K. -- -- * __sorted_values__: A `Tensor` of type V. xlaKeyValueSort' op'options keys values | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs keys, buildInputs values] return (opDef "XlaKeyValueSort" & opAttr "K" .~ tensorType (undefined :: k) & opAttr "V" .~ tensorType (undefined :: v) & op'options & opInputs .~ op'inputs) {- input_arg { name: "keys" description: "A `Tensor` of type K." type_attr: "K" } input_arg { name: "values" description: "A `Tensor` of type V." type_attr: "V" } output_arg { name: "sorted_keys" description: "A `Tensor` of type K." type_attr: "K" } output_arg { name: "sorted_values" description: "A `Tensor` of type V." type_attr: "V" } attr { name: "K" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_INT64 type: DT_BFLOAT16 type: DT_UINT16 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } attr { name: "V" type: "type" } -} -- | Wraps the XLA Pad operator, documented at -- -- https://www.tensorflow.org/performance/xla/operation_semantics#pad -- . xlaPad :: forall v'1 v'2 v'3 v'4 v'5 t tindices . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => Tensor v'1 t -- ^ __input__: A `Tensor` of type T. -> Tensor v'2 t -- ^ __padding_value__: A scalar `Tensor` of type T. -> Tensor v'3 tindices -- ^ __padding_low__: the padding to apply at the start of each input dimensions -> Tensor v'4 tindices -- ^ __padding_high__: the padding to apply at the end of each input dimension. -> Tensor v'5 tindices -- ^ __padding_interior__: the padding to apply between each input element. -> Tensor Build t -- ^ __output__: A `Tensor` of type T. xlaPad = xlaPad' id xlaPad' :: forall v'1 v'2 v'3 v'4 v'5 t tindices . (TensorType t, OneOf '[Data.Int.Int32, Data.Int.Int64] tindices) => OpParams -> Tensor v'1 t -- ^ __input__: A `Tensor` of type T. -> Tensor v'2 t -- ^ __padding_value__: A scalar `Tensor` of type T. -> Tensor v'3 tindices -- ^ __padding_low__: the padding to apply at the start of each input dimensions -> Tensor v'4 tindices -- ^ __padding_high__: the padding to apply at the end of each input dimension. -> Tensor v'5 tindices -- ^ __padding_interior__: the padding to apply between each input element. -> Tensor Build t -- ^ __output__: A `Tensor` of type T. xlaPad' op'options input padding_value padding_low padding_high padding_interior | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs padding_value, buildInputs padding_low, buildInputs padding_high, buildInputs padding_interior] return (opDef "XlaPad" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "Tindices" .~ tensorType (undefined :: tindices) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "A `Tensor` of type T." type_attr: "T" } input_arg { name: "padding_value" description: "A scalar `Tensor` of type T." type_attr: "T" } input_arg { name: "padding_low" description: "the padding to apply at the start of each input dimensions" type_attr: "Tindices" } input_arg { name: "padding_high" description: "the padding to apply at the end of each input dimension." type_attr: "Tindices" } input_arg { name: "padding_interior" description: "the padding to apply between each input element." type_attr: "Tindices" } output_arg { name: "output" description: "A `Tensor` of type T." type_attr: "T" } attr { name: "T" type: "type" } attr { name: "Tindices" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 } } } -} -- | Receives the named tensor from another XLA computation. Wraps the XLA Recv -- -- operator documented at -- https://www.tensorflow.org/performance/xla/operation_semantics#recv . xlaRecv :: forall dtype m' . (MonadBuild m', TensorType dtype) => Shape -- ^ __shape__: The shape of the tensor. -> m' (Tensor Value dtype) -- ^ __tensor__: The tensor to receive. xlaRecv = xlaRecv' id xlaRecv' :: forall dtype m' . (MonadBuild m', TensorType dtype) => OpParams -> Shape -- ^ __shape__: The shape of the tensor. -> m' (Tensor Value dtype) -- ^ __tensor__: The tensor to receive. xlaRecv' op'options shape | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "XlaRecv" & opAttr "dtype" .~ tensorType (undefined :: dtype) & opAttr "shape" .~ shape & op'options & opInputs .~ op'inputs) {- output_arg { name: "tensor" description: "The tensor to receive." type_attr: "dtype" } attr { name: "dtype" type: "type" description: "The type of the tensor." } attr { name: "tensor_name" type: "string" description: "A string key that identifies the channel." } attr { name: "shape" type: "shape" description: "The shape of the tensor." } -} -- | Replica ID. xlaReplicaId :: Tensor Build Data.Int.Int32 -- ^ __id__ xlaReplicaId = xlaReplicaId' id xlaReplicaId' :: OpParams -> Tensor Build Data.Int.Int32 -- ^ __id__ xlaReplicaId' op'options | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] return (opDef "XlaReplicaId" & op'options & opInputs .~ op'inputs) {- output_arg { name: "id" type: DT_INT32 } -} -- | Computes the eigen decomposition of a batch of self-adjoint matrices -- -- (Note: Only real inputs are supported). -- -- Computes the eigenvalues and eigenvectors of the innermost N-by-N matrices in -- tensor such that tensor[...,:,:] * v[..., :,i] = e[..., i] * v[...,:,i], for -- i=0...N-1. xlaSelfAdjointEig :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Float -- ^ __epsilon__: the tolerance ratio. -> Bool -- ^ __lower__: a boolean specifies whether the calculation is done with the lower -- triangular part or the upper triangular part. -> Data.Int.Int64 -- ^ __max_iter__: maximum number of sweep update, i.e., the whole lower triangular -- part or upper triangular part based on parameter lower. Heuristically, it has -- been argued that approximatly logN sweeps are needed in practice (Ref: Golub & -- van Loan "Matrix Computation"). -> Tensor v'1 t -- ^ __a__: the input tensor. -> (Tensor Build t, Tensor Build t) -- ^ (__w__, __v__) -- -- * __w__: The eigenvalues in ascending order, each repeated according to its -- multiplicity. -- -- * __v__: The column v[..., :, i] is the normalized eigenvector corresponding to the -- eigenvalue w[..., i]. xlaSelfAdjointEig = xlaSelfAdjointEig' id xlaSelfAdjointEig' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Float -- ^ __epsilon__: the tolerance ratio. -> Bool -- ^ __lower__: a boolean specifies whether the calculation is done with the lower -- triangular part or the upper triangular part. -> Data.Int.Int64 -- ^ __max_iter__: maximum number of sweep update, i.e., the whole lower triangular -- part or upper triangular part based on parameter lower. Heuristically, it has -- been argued that approximatly logN sweeps are needed in practice (Ref: Golub & -- van Loan "Matrix Computation"). -> Tensor v'1 t -- ^ __a__: the input tensor. -> (Tensor Build t, Tensor Build t) -- ^ (__w__, __v__) -- -- * __w__: The eigenvalues in ascending order, each repeated according to its -- multiplicity. -- -- * __v__: The column v[..., :, i] is the normalized eigenvector corresponding to the -- eigenvalue w[..., i]. xlaSelfAdjointEig' op'options epsilon lower max_iter a | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs a] return (opDef "XlaSelfAdjointEig" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "epsilon" .~ epsilon & opAttr "lower" .~ lower & opAttr "max_iter" .~ max_iter & op'options & opInputs .~ op'inputs) {- input_arg { name: "a" description: "the input tensor." type_attr: "T" } output_arg { name: "w" description: "The eigenvalues in ascending order, each repeated according to its\nmultiplicity." type_attr: "T" } output_arg { name: "v" description: "The column v[..., :, i] is the normalized eigenvector corresponding to the\neigenvalue w[..., i]." type_attr: "T" } attr { name: "lower" type: "bool" description: "a boolean specifies whether the calculation is done with the lower\ntriangular part or the upper triangular part." } attr { name: "max_iter" type: "int" description: "maximum number of sweep update, i.e., the whole lower triangular\npart or upper triangular part based on parameter lower. Heuristically, it has\nbeen argued that approximatly logN sweeps are needed in practice (Ref: Golub &\nvan Loan \"Matrix Computation\")." } attr { name: "epsilon" type: "float" description: "the tolerance ratio." } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } -} -- | Sends the named tensor to another XLA computation. Wraps the XLA Send operator -- -- documented at -- https://www.tensorflow.org/performance/xla/operation_semantics#send . xlaSend :: forall v'1 t m' . (MonadBuild m', TensorType t) => Tensor v'1 t -- ^ __tensor__: The tensor to send. -> m' (ControlNode) xlaSend = xlaSend' id xlaSend' :: forall v'1 t m' . (MonadBuild m', TensorType t) => OpParams -> Tensor v'1 t -- ^ __tensor__: The tensor to send. -> m' (ControlNode) xlaSend' op'options tensor | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs tensor] buildOp [] (opDef "XlaSend" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "tensor" description: "The tensor to send." type_attr: "T" } attr { name: "T" type: "type" } attr { name: "tensor_name" type: "string" description: "A string key that identifies the channel." } -} -- | Wraps the XLA Sort operator, documented at -- -- https://www.tensorflow.org/performance/xla/operation_semantics#sort -- . -- -- Sorts a tensor. Currently only sorts in ascending order are supported. xlaSort :: forall v'1 t . (TensorType t) => Tensor v'1 t -- ^ __input__: A `Tensor` of type T. -> Tensor Build t -- ^ __output__: A `Tensor` of type T. xlaSort = xlaSort' id xlaSort' :: forall v'1 t . (TensorType t) => OpParams -> Tensor v'1 t -- ^ __input__: A `Tensor` of type T. -> Tensor Build t -- ^ __output__: A `Tensor` of type T. xlaSort' op'options input | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "XlaSort" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "A `Tensor` of type T." type_attr: "T" } output_arg { name: "output" description: "A `Tensor` of type T." type_attr: "T" } attr { name: "T" type: "type" } -} -- | Computes the eigen decomposition of a batch of self-adjoint matrices -- -- (Note: Only real inputs are supported). -- -- Computes the eigenvalues and eigenvectors of the innermost M-by-N matrices in -- tensor such that tensor[...,:,:] = u[..., :, :] * Diag(s[..., :]) * Transpose(v[...,:,:]). xlaSvd :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => Float -- ^ __epsilon__: the tolerance ratio. -> Data.Int.Int64 -- ^ __max_iter__: maximum number of sweep update, i.e., the whole lower triangular -- part or upper triangular part based on parameter lower. Heuristically, it has -- been argued that approximatly log(min (M, N)) sweeps are needed in practice -- (Ref: Golub & van Loan "Matrix Computation"). -> Tensor v'1 t -- ^ __a__: the input tensor. -> (Tensor Build t, Tensor Build t, Tensor Build t) -- ^ (__s__, __u__, __v__) -- -- * __s__: Singular values. The values are sorted in reverse order of magnitude, so -- s[..., 0] is the largest value, s[..., 1] is the second largest, etc. -- -- * __u__: Left singular vectors. -- -- * __v__: Right singular vectors. xlaSvd = xlaSvd' id xlaSvd' :: forall v'1 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word32, Data.Word.Word64, Data.Word.Word8, Double, Float] t) => OpParams -> Float -- ^ __epsilon__: the tolerance ratio. -> Data.Int.Int64 -- ^ __max_iter__: maximum number of sweep update, i.e., the whole lower triangular -- part or upper triangular part based on parameter lower. Heuristically, it has -- been argued that approximatly log(min (M, N)) sweeps are needed in practice -- (Ref: Golub & van Loan "Matrix Computation"). -> Tensor v'1 t -- ^ __a__: the input tensor. -> (Tensor Build t, Tensor Build t, Tensor Build t) -- ^ (__s__, __u__, __v__) -- -- * __s__: Singular values. The values are sorted in reverse order of magnitude, so -- s[..., 0] is the largest value, s[..., 1] is the second largest, etc. -- -- * __u__: Left singular vectors. -- -- * __v__: Right singular vectors. xlaSvd' op'options epsilon max_iter a | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs a] return (opDef "XlaSvd" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "epsilon" .~ epsilon & opAttr "max_iter" .~ max_iter & op'options & opInputs .~ op'inputs) {- input_arg { name: "a" description: "the input tensor." type_attr: "T" } output_arg { name: "s" description: "Singular values. The values are sorted in reverse order of magnitude, so\ns[..., 0] is the largest value, s[..., 1] is the second largest, etc." type_attr: "T" } output_arg { name: "u" description: "Left singular vectors." type_attr: "T" } output_arg { name: "v" description: "Right singular vectors." type_attr: "T" } attr { name: "max_iter" type: "int" description: "maximum number of sweep update, i.e., the whole lower triangular\npart or upper triangular part based on parameter lower. Heuristically, it has\nbeen argued that approximatly log(min (M, N)) sweeps are needed in practice\n(Ref: Golub & van Loan \"Matrix Computation\")." } attr { name: "epsilon" type: "float" description: "the tolerance ratio." } attr { name: "precision_config" type: "string" description: "a serialized xla::PrecisionConfig proto." } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_UINT8 type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 type: DT_INT64 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 type: DT_BFLOAT16 type: DT_UINT16 type: DT_COMPLEX128 type: DT_HALF type: DT_UINT32 type: DT_UINT64 } } } -} -- | xlogy :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ xlogy = xlogy' id xlogy' :: forall v'1 v'2 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor Build t -- ^ __z__ xlogy' op'options x y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y] return (opDef "Xlogy" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | zerosLike :: forall v'1 t . (TensorType t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ zerosLike = zerosLike' id zerosLike' :: forall v'1 t . (TensorType t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ zerosLike' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "ZerosLike" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" } -} -- | zeta :: forall v'1 v'2 t . (OneOf '[Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __q__ -> Tensor Build t -- ^ __z__ zeta = zeta' id zeta' :: forall v'1 v'2 t . (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __q__ -> Tensor Build t -- ^ __z__ zeta' op'options x q | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs q] return (opDef "Zeta" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "q" type_attr: "T" } output_arg { name: "z" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } -} -- | zipDataset :: [DataType] -- ^ __output_types__ -> [Tensor v'1 Variant] -- ^ __input_datasets__ -> Tensor Build Variant -- ^ __handle__ zipDataset = zipDataset' id zipDataset' :: OpParams -> [DataType] -- ^ __output_types__ -> [Tensor v'1 Variant] -- ^ __input_datasets__ -> Tensor Build Variant -- ^ __handle__ zipDataset' op'options output_types input_datasets | eqLengthGuard [("N", [("input_datasets", length input_datasets)])] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input_datasets] return (opDef "ZipDataset" & opAttr "output_types" .~ output_types & opAttr "N" .~ n & op'options & opInputs .~ op'inputs) where n = fromIntegral (length input_datasets) :: Int64 {- input_arg { name: "input_datasets" type: DT_VARIANT number_attr: "N" } output_arg { name: "handle" type: DT_VARIANT } attr { name: "output_types" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "output_shapes" type: "list(shape)" has_minimum: true minimum: 1 } attr { name: "N" type: "int" has_minimum: true minimum: 1 } -} -- | A graph node which represents an argument to a function. _Arg :: forall t m' . (MonadBuild m', TensorType t) => Data.Int.Int64 -- ^ __index__: This argument is the index-th argument of the function. -- -- Attributes for shape inference: -- 1. _output_shapes: this attribute can be set on an _Arg node producing -- non-resource output(s). If set, its value should contain a list of -- TensorShapeProto describing the shape(s) of the tensor(s) this _Arg node will -- produce. If set, _Arg node's shape inference function will use it as the -- node's output shapes. -- 2. _handle_dtypes and _handle_shapes: these attributes can be set on an _Arg -- node producing resource output(s). If set, value of _handle_dtypes should -- contain the dtype(s) of the resource(s) and value of _handle_shapes should -- contain the shape(s) of the resource(s). If both attributes are set, _Arg -- node's shape inference function will use their values as the node's output -- type(s) and shape(s). -> m' (Tensor Value t) -- ^ __output__: The argument. _Arg = _Arg' id _Arg' :: forall t m' . (MonadBuild m', TensorType t) => OpParams -> Data.Int.Int64 -- ^ __index__: This argument is the index-th argument of the function. -- -- Attributes for shape inference: -- 1. _output_shapes: this attribute can be set on an _Arg node producing -- non-resource output(s). If set, its value should contain a list of -- TensorShapeProto describing the shape(s) of the tensor(s) this _Arg node will -- produce. If set, _Arg node's shape inference function will use it as the -- node's output shapes. -- 2. _handle_dtypes and _handle_shapes: these attributes can be set on an _Arg -- node producing resource output(s). If set, value of _handle_dtypes should -- contain the dtype(s) of the resource(s) and value of _handle_shapes should -- contain the shape(s) of the resource(s). If both attributes are set, _Arg -- node's shape inference function will use their values as the node's output -- type(s) and shape(s). -> m' (Tensor Value t) -- ^ __output__: The argument. _Arg' op'options index | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "_Arg" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "index" .~ index & op'options & opInputs .~ op'inputs) {- output_arg { name: "output" description: "The argument." type_attr: "T" } attr { name: "T" type: "type" } attr { name: "index" type: "int" description: "This argument is the index-th argument of the function.\n\nAttributes for shape inference:\n1. _output_shapes: this attribute can be set on an _Arg node producing\n non-resource output(s). If set, its value should contain a list of\n TensorShapeProto describing the shape(s) of the tensor(s) this _Arg node will\n produce. If set, _Arg node\'s shape inference function will use it as the\n node\'s output shapes.\n2. _handle_dtypes and _handle_shapes: these attributes can be set on an _Arg\n node producing resource output(s). If set, value of _handle_dtypes should\n contain the dtype(s) of the resource(s) and value of _handle_shapes should\n contain the shape(s) of the resource(s). If both attributes are set, _Arg\n node\'s shape inference function will use their values as the node\'s output\n type(s) and shape(s)." has_minimum: true } -} -- | Converts an array of tensors to a list of tensors. _ArrayToList :: forall v'1 t out_types . (TensorType t, TensorTypes out_types) => [Tensor v'1 t] -- ^ __input__ -> TensorList (Build) out_types -- ^ __output__ _ArrayToList = _ArrayToList' id _ArrayToList' :: forall v'1 t out_types . (TensorType t, TensorTypes out_types) => OpParams -> [Tensor v'1 t] -- ^ __input__ -> TensorList (Build) out_types -- ^ __output__ _ArrayToList' op'options input | eqLengthGuard [("N", [("input", length input)])] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "_ArrayToList" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "out_types" .~ fromTensorTypes (Proxy :: Proxy out_types) & opAttr "N" .~ n & op'options & opInputs .~ op'inputs) where n = fromIntegral (length input) :: Int64 {- input_arg { name: "input" type_attr: "T" number_attr: "N" } output_arg { name: "output" type_list_attr: "out_types" } attr { name: "T" type: "type" } attr { name: "N" type: "int" has_minimum: true minimum: 1 } attr { name: "out_types" type: "list(type)" has_minimum: true minimum: 1 } -} -- | An op that sets up the centralized structures for a distributed TPU -- -- system. _ConfigureDistributedTPU :: forall v'1 m' . (MonadBuild m') => [Tensor v'1 Data.Int.Int32] -- ^ __inputs__: A scalar tensor for each host indicating how many TPU chips -- there are on the host. -> m' (Tensor Value Data.ByteString.ByteString) -- ^ __output__: A tensor containing a TPUHostConfiguration proto serialized to -- a string, containing the information necessary to initialize the chips -- in a host. _ConfigureDistributedTPU = _ConfigureDistributedTPU' id _ConfigureDistributedTPU' :: forall v'1 m' . (MonadBuild m') => OpParams -> [Tensor v'1 Data.Int.Int32] -- ^ __inputs__: A scalar tensor for each host indicating how many TPU chips -- there are on the host. -> m' (Tensor Value Data.ByteString.ByteString) -- ^ __output__: A tensor containing a TPUHostConfiguration proto serialized to -- a string, containing the information necessary to initialize the chips -- in a host. _ConfigureDistributedTPU' op'options inputs | eqLengthGuard [("N", [("inputs", length inputs)])] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs inputs] buildOp [] (opDef "_ConfigureDistributedTPU" & opAttr "N" .~ n & op'options & opInputs .~ op'inputs) where n = fromIntegral (length inputs) :: Int64 {- input_arg { name: "inputs" description: "A scalar tensor for each host indicating how many TPU chips\nthere are on the host." type: DT_INT32 number_attr: "N" } output_arg { name: "output" description: "A tensor containing a TPUHostConfiguration proto serialized to\na string, containing the information necessary to initialize the chips\nin a host." type: DT_STRING } attr { name: "N" type: "int" has_minimum: true minimum: 1 } -} -- | A graph node which represents an argument to a function. _DeviceArg :: forall t m' . (MonadBuild m', TensorType t) => Data.Int.Int64 -- ^ __index__: This argument is the index-th argument of the function. -> m' (Tensor Value t) -- ^ __output__: The argument. _DeviceArg = _DeviceArg' id _DeviceArg' :: forall t m' . (MonadBuild m', TensorType t) => OpParams -> Data.Int.Int64 -- ^ __index__: This argument is the index-th argument of the function. -> m' (Tensor Value t) -- ^ __output__: The argument. _DeviceArg' op'options index | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "_DeviceArg" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "index" .~ index & op'options & opInputs .~ op'inputs) {- output_arg { name: "output" description: "The argument." type_attr: "T" } attr { name: "T" type: "type" } attr { name: "index" type: "int" description: "This argument is the index-th argument of the function." has_minimum: true } -} -- | A graph node which represents a return value of a function. _DeviceRetval :: forall v'1 t m' . (MonadBuild m', TensorType t) => Data.Int.Int64 -- ^ __index__: This return value is the index-th return value of the function. -> Tensor v'1 t -- ^ __input__: The return value. -> m' (ControlNode) _DeviceRetval = _DeviceRetval' id _DeviceRetval' :: forall v'1 t m' . (MonadBuild m', TensorType t) => OpParams -> Data.Int.Int64 -- ^ __index__: This return value is the index-th return value of the function. -> Tensor v'1 t -- ^ __input__: The return value. -> m' (ControlNode) _DeviceRetval' op'options index input | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] buildOp [] (opDef "_DeviceRetval" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "index" .~ index & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "The return value." type_attr: "T" } attr { name: "T" type: "type" } attr { name: "index" type: "int" description: "This return value is the index-th return value of the function." has_minimum: true } -} -- | An op that disconnects the TPUs on a host from a running distributed -- -- TPU system. _DisconnectHostFromDistributedTPUSystem :: forall m' . (MonadBuild m') => m' (Tensor Value Data.Int.Int32) -- ^ __number_of_tpu_chips__: A scalar tensor containing the number of TPU -- chips on the host. _DisconnectHostFromDistributedTPUSystem = _DisconnectHostFromDistributedTPUSystem' id _DisconnectHostFromDistributedTPUSystem' :: forall m' . (MonadBuild m') => OpParams -> m' (Tensor Value Data.Int.Int32) -- ^ __number_of_tpu_chips__: A scalar tensor containing the number of TPU -- chips on the host. _DisconnectHostFromDistributedTPUSystem' op'options | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "_DisconnectHostFromDistributedTPUSystem" & op'options & opInputs .~ op'inputs) {- output_arg { name: "number_of_tpu_chips" description: "A scalar tensor containing the number of TPU\nchips on the host." type: DT_INT32 } -} -- | *NOTE*: Do not invoke this operator directly in Python. Grappler is -- -- expected to create these operators. _FusedConv2D :: forall v'1 v'2 v'3 t . (OneOf '[Double, Float] t) => Tensor v'1 t -- ^ __input__ -> Tensor v'2 t -- ^ __filter__ -> [Tensor v'3 t] -- ^ __args__ -> Tensor Build t -- ^ __output__ _FusedConv2D = _FusedConv2D' id _FusedConv2D' :: forall v'1 v'2 v'3 t . (OneOf '[Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __input__ -> Tensor v'2 t -- ^ __filter__ -> [Tensor v'3 t] -- ^ __args__ -> Tensor Build t -- ^ __output__ _FusedConv2D' op'options input filter args | eqLengthGuard [("num_args", [("args", length args)])] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input, buildInputs filter, buildInputs args] return (opDef "_FusedConv2D" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "num_args" .~ num_args & op'options & opInputs .~ op'inputs) where num_args = fromIntegral (length args) :: Int64 {- input_arg { name: "input" type_attr: "T" } input_arg { name: "filter" type_attr: "T" } input_arg { name: "args" type_attr: "T" number_attr: "num_args" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "num_args" type: "int" has_minimum: true } attr { name: "strides" type: "list(int)" } attr { name: "padding" type: "string" allowed_values { list { s: "SAME" s: "VALID" s: "EXPLICIT" } } } attr { name: "explicit_paddings" type: "list(int)" default_value { list { } } } attr { name: "data_format" type: "string" default_value { s: "NHWC" } allowed_values { list { s: "NHWC" s: "NCHW" } } } attr { name: "dilations" type: "list(int)" default_value { list { i: 1 i: 1 i: 1 i: 1 } } } attr { name: "use_cudnn_on_gpu" type: "bool" default_value { b: true } } attr { name: "fused_ops" type: "list(string)" default_value { list { } } } attr { name: "epsilon" type: "float" default_value { f: 1.0e-4 } } -} -- | *NOTE*: Do not invoke this operator directly in Python. Grappler is -- -- expected to create these operators. _FusedMatMul :: forall v'1 v'2 v'3 t . (OneOf '[Float] t) => Tensor v'1 t -- ^ __a__ -> Tensor v'2 t -- ^ __b__ -> [Tensor v'3 t] -- ^ __args__ -> Tensor Build t -- ^ __product__ _FusedMatMul = _FusedMatMul' id _FusedMatMul' :: forall v'1 v'2 v'3 t . (OneOf '[Float] t) => OpParams -> Tensor v'1 t -- ^ __a__ -> Tensor v'2 t -- ^ __b__ -> [Tensor v'3 t] -- ^ __args__ -> Tensor Build t -- ^ __product__ _FusedMatMul' op'options a b args | eqLengthGuard [("num_args", [("args", length args)])] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs a, buildInputs b, buildInputs args] return (opDef "_FusedMatMul" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "num_args" .~ num_args & op'options & opInputs .~ op'inputs) where num_args = fromIntegral (length args) :: Int64 {- input_arg { name: "a" type_attr: "T" } input_arg { name: "b" type_attr: "T" } input_arg { name: "args" type_attr: "T" number_attr: "num_args" } output_arg { name: "product" type_attr: "T" } attr { name: "transpose_a" type: "bool" default_value { b: false } } attr { name: "transpose_b" type: "bool" default_value { b: false } } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT } } } attr { name: "num_args" type: "int" has_minimum: true } attr { name: "fused_ops" type: "list(string)" default_value { list { } } } attr { name: "epsilon" type: "float" default_value { f: 1.0e-4 } } -} -- | Cast x of type SrcT to y of DstT. -- -- _HostCast requires its input and produces its output in host memory. _HostCast :: forall v'1 srcT dstT . (TensorType srcT, TensorType dstT) => Tensor v'1 srcT -- ^ __x__ -> Tensor Build dstT -- ^ __y__ _HostCast = _HostCast' id _HostCast' :: forall v'1 srcT dstT . (TensorType srcT, TensorType dstT) => OpParams -> Tensor v'1 srcT -- ^ __x__ -> Tensor Build dstT -- ^ __y__ _HostCast' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "_HostCast" & opAttr "SrcT" .~ tensorType (undefined :: srcT) & opAttr "DstT" .~ tensorType (undefined :: dstT) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "SrcT" } output_arg { name: "y" type_attr: "DstT" } attr { name: "SrcT" type: "type" } attr { name: "DstT" type: "type" } attr { name: "Truncate" type: "bool" default_value { b: false } } -} -- | Receives the named tensor from send_device on recv_device. -- -- _HostRecv produces its output on host memory whereas _Recv produces its -- output on device memory. _HostRecv :: forall tensor_type m' . (MonadBuild m', TensorType tensor_type) => Data.Int.Int64 -- ^ __send_device_incarnation__: The current incarnation of send_device. -> m' (Tensor Value tensor_type) -- ^ __tensor__: The tensor to receive. _HostRecv = _HostRecv' id _HostRecv' :: forall tensor_type m' . (MonadBuild m', TensorType tensor_type) => OpParams -> Data.Int.Int64 -- ^ __send_device_incarnation__: The current incarnation of send_device. -> m' (Tensor Value tensor_type) -- ^ __tensor__: The tensor to receive. _HostRecv' op'options send_device_incarnation | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "_HostRecv" & opAttr "tensor_type" .~ tensorType (undefined :: tensor_type) & opAttr "send_device_incarnation" .~ send_device_incarnation & op'options & opInputs .~ op'inputs) {- output_arg { name: "tensor" description: "The tensor to receive." type_attr: "tensor_type" } attr { name: "tensor_type" type: "type" } attr { name: "tensor_name" type: "string" description: "The name of the tensor to receive." } attr { name: "send_device" type: "string" description: "The name of the device sending the tensor." } attr { name: "send_device_incarnation" type: "int" description: "The current incarnation of send_device." } attr { name: "recv_device" type: "string" description: "The name of the device receiving the tensor." } attr { name: "client_terminated" type: "bool" default_value { b: false } description: "If set to true, this indicates that the node was added\nto the graph as a result of a client-side feed or fetch of Tensor data,\nin which case the corresponding send or recv is expected to be managed\nlocally by the caller." } -} -- | Sends the named tensor from send_device to recv_device. -- -- _HostSend requires its input on host memory whereas _Send requires its -- input on device memory. _HostSend :: forall v'1 t m' . (MonadBuild m', TensorType t) => Data.Int.Int64 -- ^ __send_device_incarnation__: The current incarnation of send_device. -> Tensor v'1 t -- ^ __tensor__: The tensor to send. -> m' (ControlNode) _HostSend = _HostSend' id _HostSend' :: forall v'1 t m' . (MonadBuild m', TensorType t) => OpParams -> Data.Int.Int64 -- ^ __send_device_incarnation__: The current incarnation of send_device. -> Tensor v'1 t -- ^ __tensor__: The tensor to send. -> m' (ControlNode) _HostSend' op'options send_device_incarnation tensor | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs tensor] buildOp [] (opDef "_HostSend" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "send_device_incarnation" .~ send_device_incarnation & op'options & opInputs .~ op'inputs) {- input_arg { name: "tensor" description: "The tensor to send." type_attr: "T" } attr { name: "T" type: "type" } attr { name: "tensor_name" type: "string" description: "The name of the tensor to send." } attr { name: "send_device" type: "string" description: "The name of the device sending the tensor." } attr { name: "send_device_incarnation" type: "int" description: "The current incarnation of send_device." } attr { name: "recv_device" type: "string" description: "The name of the device receiving the tensor." } attr { name: "client_terminated" type: "bool" default_value { b: false } description: "If set to true, this indicates that the node was added\nto the graph as a result of a client-side feed or fetch of Tensor data,\nin which case the corresponding send or recv is expected to be managed\nlocally by the caller." } -} -- | An op that connects each chip on the host to a centralized UberDriver to allow -- -- them to operate as a distributed system with chips in other hosts. _InitializeHostForDistributedTPU :: forall v'1 m' . (MonadBuild m') => Tensor v'1 Data.ByteString.ByteString -- ^ __input__: A string containing the address of the UberDriver to connect to. -> m' (Tensor Value Data.Int.Int32) -- ^ __tpu_ids__: A vector containing the global TPU id of each TPU on the host. _InitializeHostForDistributedTPU = _InitializeHostForDistributedTPU' id _InitializeHostForDistributedTPU' :: forall v'1 m' . (MonadBuild m') => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __input__: A string containing the address of the UberDriver to connect to. -> m' (Tensor Value Data.Int.Int32) -- ^ __tpu_ids__: A vector containing the global TPU id of each TPU on the host. _InitializeHostForDistributedTPU' op'options input | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] buildOp [] (opDef "_InitializeHostForDistributedTPU" & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "A string containing the address of the UberDriver to connect to." type: DT_STRING } output_arg { name: "tpu_ids" description: "A vector containing the global TPU id of each TPU on the host." type: DT_INT32 } -} -- | Converts a list of tensors to an array of tensors. _ListToArray :: forall v'1 tin t . (TensorTypes tin, TensorType t) => Data.Int.Int64 -- ^ __N__ -> TensorList (v'1) tin -- ^ __input__ -> [Tensor Build t] -- ^ __output__ _ListToArray = _ListToArray' id _ListToArray' :: forall v'1 tin t . (TensorTypes tin, TensorType t) => OpParams -> Data.Int.Int64 -- ^ __N__ -> TensorList (v'1) tin -- ^ __input__ -> [Tensor Build t] -- ^ __output__ _ListToArray' op'options n input | eqLengthGuard [] = pureOp [n] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] return (opDef "_ListToArray" & opAttr "Tin" .~ fromTensorTypes (Proxy :: Proxy tin) & opAttr "T" .~ tensorType (undefined :: t) & opAttr "N" .~ n & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" type_list_attr: "Tin" } output_arg { name: "output" type_attr: "T" number_attr: "N" } attr { name: "Tin" type: "list(type)" has_minimum: true minimum: 1 } attr { name: "T" type: "type" } attr { name: "N" type: "int" has_minimum: true minimum: 1 } -} -- | Returns `x` + `y` element-wise. -- -- *NOTE*: `tf.math.add` supports broadcasting. `tf.math.add_n` does not. More about broadcasting -- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html). _MklAdd :: forall v'1 v'2 v'3 v'4 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.ByteString.ByteString, Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor v'3 Data.Word.Word8 -- ^ __mkl_x__ -> Tensor v'4 Data.Word.Word8 -- ^ __mkl_y__ -> (Tensor Build t, Tensor Build Data.Word.Word8) -- ^ (__z__, __mkl_z__) -- -- * __z__ -- -- * __mkl_z__ _MklAdd = _MklAdd' id _MklAdd' :: forall v'1 v'2 v'3 v'4 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.ByteString.ByteString, Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor v'3 Data.Word.Word8 -- ^ __mkl_x__ -> Tensor v'4 Data.Word.Word8 -- ^ __mkl_y__ -> (Tensor Build t, Tensor Build Data.Word.Word8) -- ^ (__z__, __mkl_z__) -- -- * __z__ -- -- * __mkl_z__ _MklAdd' op'options x y mkl_x mkl_y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y, buildInputs mkl_x, buildInputs mkl_y] return (opDef "_MklAdd" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } input_arg { name: "mkl_x" type: DT_UINT8 } input_arg { name: "mkl_y" type: DT_UINT8 } output_arg { name: "z" type_attr: "T" } output_arg { name: "mkl_z" type: DT_UINT8 } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_UINT8 type: DT_INT8 type: DT_INT16 type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_STRING type: DT_BFLOAT16 } } } -} -- | Returns the max of x and y (i.e. x > y ? x : y) element-wise. -- -- *NOTE*: `Maximum` supports broadcasting. More about broadcasting -- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) _MklMaximum :: forall v'1 v'2 v'3 v'4 t . (OneOf '[Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor v'3 Data.Word.Word8 -- ^ __mkl_x__ -> Tensor v'4 Data.Word.Word8 -- ^ __mkl_y__ -> (Tensor Build t, Tensor Build Data.Word.Word8) -- ^ (__z__, __mkl_z__) -- -- * __z__ -- -- * __mkl_z__ _MklMaximum = _MklMaximum' id _MklMaximum' :: forall v'1 v'2 v'3 v'4 t . (OneOf '[Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor v'3 Data.Word.Word8 -- ^ __mkl_x__ -> Tensor v'4 Data.Word.Word8 -- ^ __mkl_y__ -> (Tensor Build t, Tensor Build Data.Word.Word8) -- ^ (__z__, __mkl_z__) -- -- * __z__ -- -- * __mkl_z__ _MklMaximum' op'options x y mkl_x mkl_y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y, buildInputs mkl_x, buildInputs mkl_y] return (opDef "_MklMaximum" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } input_arg { name: "mkl_x" type: DT_UINT8 } input_arg { name: "mkl_y" type: DT_UINT8 } output_arg { name: "z" type_attr: "T" } output_arg { name: "mkl_z" type: DT_UINT8 } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_BFLOAT16 } } } -} -- | Returns x * y element-wise. -- -- *NOTE*: `Mul` supports broadcasting. More about broadcasting -- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) _MklMul :: forall v'1 v'2 v'3 v'4 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor v'3 Data.Word.Word8 -- ^ __mkl_x__ -> Tensor v'4 Data.Word.Word8 -- ^ __mkl_y__ -> (Tensor Build t, Tensor Build Data.Word.Word8) -- ^ (__z__, __mkl_z__) -- -- * __z__ -- -- * __mkl_z__ _MklMul = _MklMul' id _MklMul' :: forall v'1 v'2 v'3 v'4 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int16, Data.Int.Int32, Data.Int.Int64, Data.Int.Int8, Data.Word.Word16, Data.Word.Word8, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor v'3 Data.Word.Word8 -- ^ __mkl_x__ -> Tensor v'4 Data.Word.Word8 -- ^ __mkl_y__ -> (Tensor Build t, Tensor Build Data.Word.Word8) -- ^ (__z__, __mkl_z__) -- -- * __z__ -- -- * __mkl_z__ _MklMul' op'options x y mkl_x mkl_y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y, buildInputs mkl_x, buildInputs mkl_y] return (opDef "_MklMul" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } input_arg { name: "mkl_x" type: DT_UINT8 } input_arg { name: "mkl_y" type: DT_UINT8 } output_arg { name: "z" type_attr: "T" } output_arg { name: "mkl_z" type: DT_UINT8 } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_UINT8 type: DT_INT8 type: DT_UINT16 type: DT_INT16 type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | Returns (x - y)(x - y) element-wise. -- -- *NOTE*: `SquaredDifference` supports broadcasting. More about broadcasting -- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) _MklSquaredDifference :: forall v'1 v'2 v'3 v'4 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor v'3 Data.Word.Word8 -- ^ __mkl_x__ -> Tensor v'4 Data.Word.Word8 -- ^ __mkl_y__ -> (Tensor Build t, Tensor Build Data.Word.Word8) -- ^ (__z__, __mkl_z__) -- -- * __z__ -- -- * __mkl_z__ _MklSquaredDifference = _MklSquaredDifference' id _MklSquaredDifference' :: forall v'1 v'2 v'3 v'4 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor v'3 Data.Word.Word8 -- ^ __mkl_x__ -> Tensor v'4 Data.Word.Word8 -- ^ __mkl_y__ -> (Tensor Build t, Tensor Build Data.Word.Word8) -- ^ (__z__, __mkl_z__) -- -- * __z__ -- -- * __mkl_z__ _MklSquaredDifference' op'options x y mkl_x mkl_y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y, buildInputs mkl_x, buildInputs mkl_y] return (opDef "_MklSquaredDifference" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } input_arg { name: "mkl_x" type: DT_UINT8 } input_arg { name: "mkl_y" type: DT_UINT8 } output_arg { name: "z" type_attr: "T" } output_arg { name: "mkl_z" type: DT_UINT8 } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | Returns x - y element-wise. -- -- *NOTE*: `Sub` supports broadcasting. More about broadcasting -- [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) _MklSub :: forall v'1 v'2 v'3 v'4 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor v'3 Data.Word.Word8 -- ^ __mkl_x__ -> Tensor v'4 Data.Word.Word8 -- ^ __mkl_y__ -> (Tensor Build t, Tensor Build Data.Word.Word8) -- ^ (__z__, __mkl_z__) -- -- * __z__ -- -- * __mkl_z__ _MklSub = _MklSub' id _MklSub' :: forall v'1 v'2 v'3 v'4 t . (OneOf '[(Data.Complex.Complex Double), (Data.Complex.Complex Float), Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor v'2 t -- ^ __y__ -> Tensor v'3 Data.Word.Word8 -- ^ __mkl_x__ -> Tensor v'4 Data.Word.Word8 -- ^ __mkl_y__ -> (Tensor Build t, Tensor Build Data.Word.Word8) -- ^ (__z__, __mkl_z__) -- -- * __z__ -- -- * __mkl_z__ _MklSub' op'options x y mkl_x mkl_y | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x, buildInputs y, buildInputs mkl_x, buildInputs mkl_y] return (opDef "_MklSub" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T" } input_arg { name: "mkl_x" type: DT_UINT8 } input_arg { name: "mkl_y" type: DT_UINT8 } output_arg { name: "z" type_attr: "T" } output_arg { name: "mkl_z" type: DT_UINT8 } attr { name: "T" type: "type" allowed_values { list { type: DT_BFLOAT16 type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 type: DT_COMPLEX64 type: DT_COMPLEX128 } } } -} -- | Replacement node for NcclBroadcast. -- -- Sends data of shape `shape` from the _NcclBroadcastSend op registered in the -- same `shared_name`. -- The graph should be constructed so that one device runs `_NcclBroadcastSend` and -- `num_devices-1` devices run _NcclBroadcastRecv ops with shared_name value `c`. -- Failure to do so will cause the graph execution to fail to complete. _NcclBroadcastRecv :: forall v'1 t m' . (MonadBuild m', OneOf '[Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => Data.Int.Int64 -- ^ __num_devices__: The number of devices participating in this reduction. -> Tensor v'1 Data.Int.Int32 -- ^ __shape__: The shape of the output. -> m' (Tensor Value t) -- ^ __output__: The broadcast data received from the NcclBroadcastSend op. _NcclBroadcastRecv = _NcclBroadcastRecv' id _NcclBroadcastRecv' :: forall v'1 t m' . (MonadBuild m', OneOf '[Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => OpParams -> Data.Int.Int64 -- ^ __num_devices__: The number of devices participating in this reduction. -> Tensor v'1 Data.Int.Int32 -- ^ __shape__: The shape of the output. -> m' (Tensor Value t) -- ^ __output__: The broadcast data received from the NcclBroadcastSend op. _NcclBroadcastRecv' op'options num_devices shape | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs shape] buildOp [] (opDef "_NcclBroadcastRecv" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "num_devices" .~ num_devices & op'options & opInputs .~ op'inputs) {- input_arg { name: "shape" description: "The shape of the output." type: DT_INT32 } output_arg { name: "output" description: "The broadcast data received from the NcclBroadcastSend op." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 } } } attr { name: "num_devices" type: "int" description: "The number of devices participating in this reduction." } attr { name: "shared_name" type: "string" description: "Identifier that is shared between ops of the same broadcast." } -} -- | Replacement node for NcclBroadcast. -- -- Sends `input` to the _NcclBroadcastRecv ops registered in the same -- `shared_name`. -- The graph should be constructed so that one device runs `_NcclBroadcastSend` and -- `num_devices-1` devices run _NcclBroadcastRecv ops with shared_name value `c`. -- Failure to do so will cause the graph execution to fail to complete. _NcclBroadcastSend :: forall v'1 t m' . (MonadBuild m', OneOf '[Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => Data.Int.Int64 -- ^ __num_devices__: The number of devices participating in this reduction. -> Tensor v'1 t -- ^ __input__: The input to the broadcast. -> m' (ControlNode) _NcclBroadcastSend = _NcclBroadcastSend' id _NcclBroadcastSend' :: forall v'1 t m' . (MonadBuild m', OneOf '[Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => OpParams -> Data.Int.Int64 -- ^ __num_devices__: The number of devices participating in this reduction. -> Tensor v'1 t -- ^ __input__: The input to the broadcast. -> m' (ControlNode) _NcclBroadcastSend' op'options num_devices input | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] buildOp [] (opDef "_NcclBroadcastSend" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "num_devices" .~ num_devices & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "The input to the broadcast." type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 } } } attr { name: "num_devices" type: "int" description: "The number of devices participating in this reduction." } attr { name: "shared_name" type: "string" description: "Identifier that is shared between ops of the same broadcast." } -} -- | Replacement node for NcclReduce. -- -- Reduces 'input' from this op and the NcclReduceSend ops registered in the same -- `shared_name`. -- The graph should be constructed so that 'num_devices-1' devices run -- `_NcclReduceSend` and one device runs _NcclReduceRecv op with shared_name value -- `c`. Failure to do so will cause the graph execution to fail to complete. _NcclReduceRecv :: forall v'1 t m' . (MonadBuild m', OneOf '[Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => Data.Int.Int64 -- ^ __num_devices__: The number of devices participating in this reduction. -> Tensor v'1 t -- ^ __input__: The input to the reduction. -> m' (Tensor Value t) -- ^ __data__: The reduced data received from this op and the NcclReduceSend op. _NcclReduceRecv = _NcclReduceRecv' id _NcclReduceRecv' :: forall v'1 t m' . (MonadBuild m', OneOf '[Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => OpParams -> Data.Int.Int64 -- ^ __num_devices__: The number of devices participating in this reduction. -> Tensor v'1 t -- ^ __input__: The input to the reduction. -> m' (Tensor Value t) -- ^ __data__: The reduced data received from this op and the NcclReduceSend op. _NcclReduceRecv' op'options num_devices input | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] buildOp [] (opDef "_NcclReduceRecv" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "num_devices" .~ num_devices & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "The input to the reduction." type_attr: "T" } output_arg { name: "data" description: "The reduced data received from this op and the NcclReduceSend op." type_attr: "T" } attr { name: "reduction" type: "string" description: "the reduction operation to perform." allowed_values { list { s: "min" s: "max" s: "prod" s: "sum" } } } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 } } } attr { name: "num_devices" type: "int" description: "The number of devices participating in this reduction." } attr { name: "shared_name" type: "string" description: "Identifier that is shared between ops of the same reduce." } -} -- | Replacement node for NcclReduce. -- -- Reduces `input` to the NcclReduceRecv op registered in the same `shared_name`. -- The graph should be constructed so that 'num_devices-1' devices run -- `_NcclReduceSend` and one device runs _NcclReduceRecv op with shared_name value -- `c`. Failure to do so will cause the graph execution to fail to complete. _NcclReduceSend :: forall v'1 t m' . (MonadBuild m', OneOf '[Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => Data.Int.Int64 -- ^ __num_devices__: The number of devices participating in this reduction. -> Tensor v'1 t -- ^ __input__: The input to the reduction. -> m' (ControlNode) _NcclReduceSend = _NcclReduceSend' id _NcclReduceSend' :: forall v'1 t m' . (MonadBuild m', OneOf '[Data.Int.Int32, Data.Int.Int64, Data.Word.Word16, Double, Float] t) => OpParams -> Data.Int.Int64 -- ^ __num_devices__: The number of devices participating in this reduction. -> Tensor v'1 t -- ^ __input__: The input to the reduction. -> m' (ControlNode) _NcclReduceSend' op'options num_devices input | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] buildOp [] (opDef "_NcclReduceSend" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "num_devices" .~ num_devices & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "The input to the reduction." type_attr: "T" } attr { name: "reduction" type: "string" description: "the reduction operation to perform." allowed_values { list { s: "min" s: "max" s: "prod" s: "sum" } } } attr { name: "T" type: "type" allowed_values { list { type: DT_HALF type: DT_FLOAT type: DT_DOUBLE type: DT_INT32 type: DT_INT64 } } } attr { name: "num_devices" type: "int" description: "The number of devices participating in this reduction." } attr { name: "shared_name" type: "string" description: "Identifier that is shared between ops of the same reduce." } -} -- | Creates an empty Tensor with shape `shape` and type `dtype`. -- -- The memory can optionally be initialized. This is usually useful in -- conjunction with inplace operations. _ParallelConcatStart :: forall dtype m' . (MonadBuild m', TensorType dtype) => Shape -- ^ __shape__: 1-D `Tensor` indicating the shape of the output. -> m' (Tensor Value dtype) -- ^ __output__: An empty Tensor of the specified type. _ParallelConcatStart = _ParallelConcatStart' id _ParallelConcatStart' :: forall dtype m' . (MonadBuild m', TensorType dtype) => OpParams -> Shape -- ^ __shape__: 1-D `Tensor` indicating the shape of the output. -> m' (Tensor Value dtype) -- ^ __output__: An empty Tensor of the specified type. _ParallelConcatStart' op'options shape | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "_ParallelConcatStart" & opAttr "dtype" .~ tensorType (undefined :: dtype) & opAttr "shape" .~ shape & op'options & opInputs .~ op'inputs) {- output_arg { name: "output" description: "An empty Tensor of the specified type." type_attr: "dtype" } attr { name: "shape" type: "shape" description: "1-D `Tensor` indicating the shape of the output." } attr { name: "dtype" type: "type" description: "The element type of the returned tensor." } -} -- | Updates input `value` at `loc` with `update`. -- -- If you use this function you will almost certainly want to add -- a control dependency as done in the implementation of parallel_stack to -- avoid race conditions. _ParallelConcatUpdate :: forall v'1 v'2 t . (TensorType t) => Data.Int.Int64 -- ^ __loc__: A scalar indicating the index of the first dimension such that -- value[loc, :] is updated. -> Tensor v'1 t -- ^ __value__: A `Tensor` object that will be updated in-place. -> Tensor v'2 t -- ^ __update__: A `Tensor` of rank one less than `value` if `loc` is a scalar, -- otherwise of rank equal to `value` that contains the new values -- for `value`. -> Tensor Build t -- ^ __output__: `value` that has been updated accordingly. _ParallelConcatUpdate = _ParallelConcatUpdate' id _ParallelConcatUpdate' :: forall v'1 v'2 t . (TensorType t) => OpParams -> Data.Int.Int64 -- ^ __loc__: A scalar indicating the index of the first dimension such that -- value[loc, :] is updated. -> Tensor v'1 t -- ^ __value__: A `Tensor` object that will be updated in-place. -> Tensor v'2 t -- ^ __update__: A `Tensor` of rank one less than `value` if `loc` is a scalar, -- otherwise of rank equal to `value` that contains the new values -- for `value`. -> Tensor Build t -- ^ __output__: `value` that has been updated accordingly. _ParallelConcatUpdate' op'options loc value update | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs value, buildInputs update] return (opDef "_ParallelConcatUpdate" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "loc" .~ loc & op'options & opInputs .~ op'inputs) {- input_arg { name: "value" description: "A `Tensor` object that will be updated in-place." type_attr: "T" } input_arg { name: "update" description: "A `Tensor` of rank one less than `value` if `loc` is a scalar,\notherwise of rank equal to `value` that contains the new values\nfor `value`." type_attr: "T" } output_arg { name: "output" description: "`value` that has been updated accordingly." type_attr: "T" } attr { name: "T" type: "type" } attr { name: "loc" type: "int" description: "A scalar indicating the index of the first dimension such that\nvalue[loc, :] is updated." } -} -- | _ReadVariablesOp :: forall v'1 dtypes m' . (MonadBuild m', TensorTypes dtypes) => [Tensor v'1 ResourceHandle] -- ^ __resources__ -> m' (TensorList (Value) dtypes) -- ^ __values__ _ReadVariablesOp = _ReadVariablesOp' id _ReadVariablesOp' :: forall v'1 dtypes m' . (MonadBuild m', TensorTypes dtypes) => OpParams -> [Tensor v'1 ResourceHandle] -- ^ __resources__ -> m' (TensorList (Value) dtypes) -- ^ __values__ _ReadVariablesOp' op'options resources | eqLengthGuard [("N", [("resources", length resources)])] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs resources] buildOp [] (opDef "_ReadVariablesOp" & opAttr "dtypes" .~ fromTensorTypes (Proxy :: Proxy dtypes) & opAttr "N" .~ n & op'options & opInputs .~ op'inputs) where n = fromIntegral (length resources) :: Int64 {- input_arg { name: "resources" type: DT_RESOURCE number_attr: "N" } output_arg { name: "values" type_list_attr: "dtypes" } attr { name: "N" type: "int" has_minimum: true } attr { name: "dtypes" type: "list(type)" has_minimum: true minimum: 1 } -} -- | Receives the named tensor from send_device on recv_device. _Recv :: forall tensor_type m' . (MonadBuild m', TensorType tensor_type) => Data.Int.Int64 -- ^ __send_device_incarnation__: The current incarnation of send_device. -> m' (Tensor Value tensor_type) -- ^ __tensor__: The tensor to receive. _Recv = _Recv' id _Recv' :: forall tensor_type m' . (MonadBuild m', TensorType tensor_type) => OpParams -> Data.Int.Int64 -- ^ __send_device_incarnation__: The current incarnation of send_device. -> m' (Tensor Value tensor_type) -- ^ __tensor__: The tensor to receive. _Recv' op'options send_device_incarnation | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "_Recv" & opAttr "tensor_type" .~ tensorType (undefined :: tensor_type) & opAttr "send_device_incarnation" .~ send_device_incarnation & op'options & opInputs .~ op'inputs) {- output_arg { name: "tensor" description: "The tensor to receive." type_attr: "tensor_type" } attr { name: "tensor_type" type: "type" } attr { name: "tensor_name" type: "string" description: "The name of the tensor to receive." } attr { name: "send_device" type: "string" description: "The name of the device sending the tensor." } attr { name: "send_device_incarnation" type: "int" description: "The current incarnation of send_device." } attr { name: "recv_device" type: "string" description: "The name of the device receiving the tensor." } attr { name: "client_terminated" type: "bool" default_value { b: false } description: "If set to true, this indicates that the node was added\nto the graph as a result of a client-side feed or fetch of Tensor data,\nin which case the corresponding send or recv is expected to be managed\nlocally by the caller." } -} -- | A graph node which represents a return value of a function. _Retval :: forall v'1 t m' . (MonadBuild m', TensorType t) => Data.Int.Int64 -- ^ __index__: This return value is the index-th return value of the function. -> Tensor v'1 t -- ^ __input__: The return value. -> m' (ControlNode) _Retval = _Retval' id _Retval' :: forall v'1 t m' . (MonadBuild m', TensorType t) => OpParams -> Data.Int.Int64 -- ^ __index__: This return value is the index-th return value of the function. -> Tensor v'1 t -- ^ __input__: The return value. -> m' (ControlNode) _Retval' op'options index input | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs input] buildOp [] (opDef "_Retval" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "index" .~ index & op'options & opInputs .~ op'inputs) {- input_arg { name: "input" description: "The return value." type_attr: "T" } attr { name: "T" type: "type" } attr { name: "index" type: "int" description: "This return value is the index-th return value of the function." has_minimum: true } -} -- | Allocates a mutable tensor that becomes available to appropriately annotated -- -- downstream Ops as backing store for their output tensor allocations via the -- ScopedAllocatorMgr. -- Returns a reference to this value. -- -- This is an experimental op for internal use only. It is possible to use this -- op in unsafe ways. -- -- 'shapes' is a list of the shapes of the tensors that are to be allocated -- by this ScopedAllocator. -- 'shape' is the shape of the output of this Op, i.e. the 1D backing tensor -- from which the individual allocated tensors are aliased. -- 'sa_name' is the name assigned to the Node, for connectivity specification -- and debugging. -- 'id' is a non-negative integer 'scope_id' handled by the ScopedAllocatorMgr. -- 'expected_call_count' is the number of individual tensors expected to -- be allocated from the backing tensor. _ScopedAllocator :: forall t m' . (MonadBuild m', TensorType t) => Data.Int.Int64 -- ^ __expected_call_count__ -> Data.Int.Int64 -- ^ __id__ -> Shape -- ^ __shape__ -> m' (Tensor Value t) -- ^ __output__ _ScopedAllocator = _ScopedAllocator' id _ScopedAllocator' :: forall t m' . (MonadBuild m', TensorType t) => OpParams -> Data.Int.Int64 -- ^ __expected_call_count__ -> Data.Int.Int64 -- ^ __id__ -> Shape -- ^ __shape__ -> m' (Tensor Value t) -- ^ __output__ _ScopedAllocator' op'options expected_call_count id shape | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "_ScopedAllocator" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "expected_call_count" .~ expected_call_count & opAttr "id" .~ id & opAttr "shape" .~ shape & op'options & opInputs .~ op'inputs) {- output_arg { name: "output" type_attr: "T" } attr { name: "shapes" type: "list(shape)" } attr { name: "shape" type: "shape" } attr { name: "T" type: "type" } attr { name: "sa_name" type: "string" } attr { name: "id" type: "int" } attr { name: "expected_call_count" type: "int" } -} -- | Acts like a Concat Op that merges multple tensors into one, however it must -- -- only be used in conjunction with a ScopedAllocator which is backing the memory -- of all of its input tensors so that actually it just outputs a read-only -- reference to that ScopedAllocator's backing tensor. -- -- This is an experimental op for internal use only. It is possible to use this -- op in unsafe ways. -- -- 'backing' is the backing tensor, i.e. the output of an upstream ScopedAllocator. -- 'inputs' is a list of nominal input tensors, all of which must be aliases -- to regions of the backing tensor. These will be outputs of upstream nodes -- that allocate their outputs from the same ScopedAllocator. -- 'shape' is the shape of the output, which will usually be the same shape as -- the input backing tensor. -- 'reshape' is true iff the output shape is to be different from that of -- the input backing tensor. -- 'sa_name' is the Node name of the upstream ScopedAllocator. -- 'id' is the scope_id identifying the upstream ScopedAllocator. -- 'N' is the number of nominal inputs to be concatenated. _ScopedAllocatorConcat :: forall v'1 v'2 t m' . (MonadBuild m', TensorType t) => Data.Int.Int64 -- ^ __id__ -> Shape -- ^ __shape__ -> Tensor v'1 t -- ^ __backing__ -> [Tensor v'2 t] -- ^ __inputs__ -> m' (Tensor Value t) -- ^ __output__ _ScopedAllocatorConcat = _ScopedAllocatorConcat' id _ScopedAllocatorConcat' :: forall v'1 v'2 t m' . (MonadBuild m', TensorType t) => OpParams -> Data.Int.Int64 -- ^ __id__ -> Shape -- ^ __shape__ -> Tensor v'1 t -- ^ __backing__ -> [Tensor v'2 t] -- ^ __inputs__ -> m' (Tensor Value t) -- ^ __output__ _ScopedAllocatorConcat' op'options id shape backing inputs | eqLengthGuard [("N", [("inputs", length inputs)])] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs backing, buildInputs inputs] buildOp [] (opDef "_ScopedAllocatorConcat" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "id" .~ id & opAttr "shape" .~ shape & opAttr "N" .~ n & op'options & opInputs .~ op'inputs) where n = fromIntegral (length inputs) :: Int64 {- input_arg { name: "backing" type_attr: "T" } input_arg { name: "inputs" type_attr: "T" number_attr: "N" } output_arg { name: "output" type_attr: "T" } attr { name: "shape" type: "shape" } attr { name: "T" type: "type" } attr { name: "reshape" type: "bool" default_value { b: false } } attr { name: "sa_name" type: "string" } attr { name: "id" type: "int" } attr { name: "N" type: "int" has_minimum: true minimum: 2 } -} -- | Acts roughly like a SplitV Op that splits one tensor into multiple tensors -- -- but must only be used in conjunction with corresponding ScopedAllocator -- and ScopedAllocatorConcat instances. In practice it is provided as inputs -- the backing tensor as first input, which contains the concatenated values, -- and a list of alias tensors as its other input and it simply outputs that -- second list. -- -- This is an experimental op for internal use only. It is possible to use this -- op in unsafe ways. -- -- 'concat' is the single output produced by an upstream ScopedAllocatorConcat -- node. This is actually the backing tensor from a ScopedAllocator node -- upstream of the ScopedAllocatorConcat. -- 'split' is a list of tensors aliased from the backing tensor. It will -- become the output of this ScopedAllocatorSplit node. -- 'type' is the common DataType of all of the input and output tensors. -- 'sa_name' is the Node name of the upstream ScopedAllocator. -- 'id' is the scope_id identifying the upstream ScopedAllocator. -- 'N' is the number of split tensors. -- 'shapes' is a list of the split tensor shapes. _ScopedAllocatorSplit :: forall v'1 v'2 t m' . (MonadBuild m', TensorType t) => Data.Int.Int64 -- ^ __id__ -> Tensor v'1 t -- ^ __concat__ -> [Tensor v'2 t] -- ^ __split__ -> m' ([Tensor Value t]) -- ^ __output__ _ScopedAllocatorSplit = _ScopedAllocatorSplit' id _ScopedAllocatorSplit' :: forall v'1 v'2 t m' . (MonadBuild m', TensorType t) => OpParams -> Data.Int.Int64 -- ^ __id__ -> Tensor v'1 t -- ^ __concat__ -> [Tensor v'2 t] -- ^ __split__ -> m' ([Tensor Value t]) -- ^ __output__ _ScopedAllocatorSplit' op'options id concat split | eqLengthGuard [("N", [("split", length split)])] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs concat, buildInputs split] buildOp [n] (opDef "_ScopedAllocatorSplit" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "id" .~ id & opAttr "N" .~ n & op'options & opInputs .~ op'inputs) where n = fromIntegral (length split) :: Int64 {- input_arg { name: "concat" type_attr: "T" } input_arg { name: "split" type_attr: "T" number_attr: "N" } output_arg { name: "output" type_attr: "T" number_attr: "N" } attr { name: "T" type: "type" } attr { name: "sa_name" type: "string" } attr { name: "id" type: "int" } attr { name: "N" type: "int" has_minimum: true minimum: 2 } attr { name: "shapes" type: "list(shape)" } -} -- | Sends the named tensor from send_device to recv_device. _Send :: forall v'1 t m' . (MonadBuild m', TensorType t) => Data.Int.Int64 -- ^ __send_device_incarnation__: The current incarnation of send_device. -> Tensor v'1 t -- ^ __tensor__: The tensor to send. -> m' (ControlNode) _Send = _Send' id _Send' :: forall v'1 t m' . (MonadBuild m', TensorType t) => OpParams -> Data.Int.Int64 -- ^ __send_device_incarnation__: The current incarnation of send_device. -> Tensor v'1 t -- ^ __tensor__: The tensor to send. -> m' (ControlNode) _Send' op'options send_device_incarnation tensor | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs tensor] buildOp [] (opDef "_Send" & opAttr "T" .~ tensorType (undefined :: t) & opAttr "send_device_incarnation" .~ send_device_incarnation & op'options & opInputs .~ op'inputs) {- input_arg { name: "tensor" description: "The tensor to send." type_attr: "T" } attr { name: "T" type: "type" } attr { name: "tensor_name" type: "string" description: "The name of the tensor to send." } attr { name: "send_device" type: "string" description: "The name of the device sending the tensor." } attr { name: "send_device_incarnation" type: "int" description: "The current incarnation of send_device." } attr { name: "recv_device" type: "string" description: "The name of the device receiving the tensor." } attr { name: "client_terminated" type: "bool" default_value { b: false } description: "If set to true, this indicates that the node was added\nto the graph as a result of a client-side feed or fetch of Tensor data,\nin which case the corresponding send or recv is expected to be managed\nlocally by the caller." } -} -- | An op that informs a host of the global ids of all the of TPUs in the -- -- system. _SetGlobalTPUArray :: forall v'1 m' . (MonadBuild m') => Tensor v'1 Data.ByteString.ByteString -- ^ __topology__: A serialized tensorflow.tpu.TopologyProto that describes the TPU -- topology. -> m' (ControlNode) _SetGlobalTPUArray = _SetGlobalTPUArray' id _SetGlobalTPUArray' :: forall v'1 m' . (MonadBuild m') => OpParams -> Tensor v'1 Data.ByteString.ByteString -- ^ __topology__: A serialized tensorflow.tpu.TopologyProto that describes the TPU -- topology. -> m' (ControlNode) _SetGlobalTPUArray' op'options topology | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs topology] buildOp [] (opDef "_SetGlobalTPUArray" & op'options & opInputs .~ op'inputs) {- input_arg { name: "topology" description: "A serialized tensorflow.tpu.TopologyProto that describes the TPU\ntopology." type: DT_STRING } -} -- | An op that shuts down a running distributed TPU system. The Op returns -- -- an error if no system is running. This Op must be run on the same -- TPU_SYSTEM device as the corresponding _ConfigureDistributedTPU was run -- to start the system, and must be run only after -- _DisconnectHostFromDistributedTPUSystem has completed on every host in -- the system. _ShutdownDistributedTPU :: forall m' . (MonadBuild m') => m' (ControlNode) _ShutdownDistributedTPU = _ShutdownDistributedTPU' id _ShutdownDistributedTPU' :: forall m' . (MonadBuild m') => OpParams -> m' (ControlNode) _ShutdownDistributedTPU' op'options | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [] (opDef "_ShutdownDistributedTPU" & op'options & opInputs .~ op'inputs) {- -} -- | *NOTE*: Do not invoke this operator directly in Python. Graph rewrite pass is -- -- expected to create these operators. _UnaryOpsComposition :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ _UnaryOpsComposition = _UnaryOpsComposition' id _UnaryOpsComposition' :: forall v'1 t . (OneOf '[Data.Word.Word16, Double, Float] t) => OpParams -> Tensor v'1 t -- ^ __x__ -> Tensor Build t -- ^ __y__ _UnaryOpsComposition' op'options x | eqLengthGuard [] = pureOp [] $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs x] return (opDef "_UnaryOpsComposition" & opAttr "T" .~ tensorType (undefined :: t) & op'options & opInputs .~ op'inputs) {- input_arg { name: "x" type_attr: "T" } output_arg { name: "y" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_FLOAT type: DT_HALF type: DT_DOUBLE } } } attr { name: "op_names" type: "list(string)" } -} -- | _VarHandlesOp :: forall m' . (MonadBuild m') => Data.Int.Int64 -- ^ __N__ -> [DataType] -- ^ __dtypes__ -> m' ([Tensor Value ResourceHandle]) -- ^ __resources__ _VarHandlesOp = _VarHandlesOp' id _VarHandlesOp' :: forall m' . (MonadBuild m') => OpParams -> Data.Int.Int64 -- ^ __N__ -> [DataType] -- ^ __dtypes__ -> m' ([Tensor Value ResourceHandle]) -- ^ __resources__ _VarHandlesOp' op'options n dtypes | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [] buildOp [n] (opDef "_VarHandlesOp" & opAttr "N" .~ n & opAttr "dtypes" .~ dtypes & op'options & opInputs .~ op'inputs) {- output_arg { name: "resources" type: DT_RESOURCE number_attr: "N" } attr { name: "containers" type: "list(string)" } attr { name: "shared_names" type: "list(string)" } attr { name: "N" type: "int" has_minimum: true } attr { name: "dtypes" type: "list(type)" } attr { name: "shapes" type: "list(shape)" } -} -- | An op that blocks execution until a distributed TPU system has -- -- started up. This Op must be run on the same TPU_SYSTEM device as -- _ConfigureDistributedTPU, and takes an inputs the outputs from the -- _InitializeHostForDistributedTPU Ops. _WaitForDistributedTPU :: forall v'1 m' . (MonadBuild m') => [Tensor v'1 Data.Int.Int32] -- ^ __inputs__: For each initialized host, a vector giving the global TPU id -- of each TPU on the host. -> m' (Tensor Value Data.ByteString.ByteString) -- ^ __topology__: A serialized tensorflow.tpu.TopologyProto that describes the TPU -- topology. _WaitForDistributedTPU = _WaitForDistributedTPU' id _WaitForDistributedTPU' :: forall v'1 m' . (MonadBuild m') => OpParams -> [Tensor v'1 Data.Int.Int32] -- ^ __inputs__: For each initialized host, a vector giving the global TPU id -- of each TPU on the host. -> m' (Tensor Value Data.ByteString.ByteString) -- ^ __topology__: A serialized tensorflow.tpu.TopologyProto that describes the TPU -- topology. _WaitForDistributedTPU' op'options inputs | eqLengthGuard [("N", [("inputs", length inputs)])] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs inputs] buildOp [] (opDef "_WaitForDistributedTPU" & opAttr "N" .~ n & op'options & opInputs .~ op'inputs) where n = fromIntegral (length inputs) :: Int64 {- input_arg { name: "inputs" description: "For each initialized host, a vector giving the global TPU id\nof each TPU on the host." type: DT_INT32 number_attr: "N" } output_arg { name: "topology" description: "A serialized tensorflow.tpu.TopologyProto that describes the TPU\ntopology." type: DT_STRING } attr { name: "startup_timeout_sec" type: "int" default_value { i: 20 } description: "The number of seconds to wait for the TPU system\nto stabilize." } attr { name: "N" type: "int" has_minimum: true minimum: 1 } -} -- | A placeholder op for multiple values that will be sent to TensorFlow from a -- -- running XLA computation. _XlaRecvAtHost :: forall v'1 toutputs m' . (MonadBuild m', TensorTypes toutputs) => Data.Int.Int64 -- ^ __device_ordinal__: The device to use. -> Tensor v'1 Data.ByteString.ByteString -- ^ __dynamic_key__: The key sent at runtime by the compile node to identify which -- execution the transfer corresponds to. -> m' (TensorList (Value) toutputs) -- ^ __outputs__: A list of tensors that will be received from the XLA computation. _XlaRecvAtHost = _XlaRecvAtHost' id _XlaRecvAtHost' :: forall v'1 toutputs m' . (MonadBuild m', TensorTypes toutputs) => OpParams -> Data.Int.Int64 -- ^ __device_ordinal__: The device to use. -> Tensor v'1 Data.ByteString.ByteString -- ^ __dynamic_key__: The key sent at runtime by the compile node to identify which -- execution the transfer corresponds to. -> m' (TensorList (Value) toutputs) -- ^ __outputs__: A list of tensors that will be received from the XLA computation. _XlaRecvAtHost' op'options device_ordinal dynamic_key | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs dynamic_key] buildOp [] (opDef "_XlaRecvAtHost" & opAttr "Toutputs" .~ fromTensorTypes (Proxy :: Proxy toutputs) & opAttr "device_ordinal" .~ device_ordinal & op'options & opInputs .~ op'inputs) {- input_arg { name: "dynamic_key" description: "The key sent at runtime by the compile node to identify which\nexecution the transfer corresponds to." type: DT_STRING } output_arg { name: "outputs" description: "A list of tensors that will be received from the XLA computation." type_list_attr: "Toutputs" } attr { name: "Toutputs" type: "list(type)" description: "The element types of each element in `outputs`." has_minimum: true } attr { name: "key" type: "string" description: "A key that is unique in the computation and associates the send with the consumer in\nthe XLA computation." } attr { name: "device_ordinal" type: "int" description: "The device to use." } -} -- | XLA Run Op. For use by the XLA JIT only. -- -- Executes a TensorFlow function previously compiled into a LocalExecutable by an -- _XlaCompile op. _XlaRun :: forall v'1 v'2 targs tresults m' . (MonadBuild m', TensorTypes targs, TensorTypes tresults) => TensorList (v'1) targs -- ^ __args__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __key__ -> m' (TensorList (Value) tresults) -- ^ __results__ _XlaRun = _XlaRun' id _XlaRun' :: forall v'1 v'2 targs tresults m' . (MonadBuild m', TensorTypes targs, TensorTypes tresults) => OpParams -> TensorList (v'1) targs -- ^ __args__ -> Tensor v'2 Data.ByteString.ByteString -- ^ __key__ -> m' (TensorList (Value) tresults) -- ^ __results__ _XlaRun' op'options args key | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs args, buildInputs key] buildOp [] (opDef "_XlaRun" & opAttr "Targs" .~ fromTensorTypes (Proxy :: Proxy targs) & opAttr "Tresults" .~ fromTensorTypes (Proxy :: Proxy tresults) & op'options & opInputs .~ op'inputs) {- input_arg { name: "args" type_list_attr: "Targs" } input_arg { name: "key" type: DT_STRING } output_arg { name: "results" type_list_attr: "Tresults" } attr { name: "Targs" type: "list(type)" has_minimum: true } attr { name: "Tresults" type: "list(type)" has_minimum: true } -} -- | A placeholder op for multiple values that will be sent from TensorFlow to a -- -- running XLA computation. _XlaSendFromHost :: forall v'1 v'2 tinputs m' . (MonadBuild m', TensorTypes tinputs) => Data.Int.Int64 -- ^ __device_ordinal__: The device to use. -> TensorList (v'1) tinputs -- ^ __inputs__: A list of tensors that will be sent to the XLA computation. -> Tensor v'2 Data.ByteString.ByteString -- ^ __dynamic_key__: The key sent at runtime by the compile node to identify which -- execution the transfer corresponds to. -> m' (ControlNode) _XlaSendFromHost = _XlaSendFromHost' id _XlaSendFromHost' :: forall v'1 v'2 tinputs m' . (MonadBuild m', TensorTypes tinputs) => OpParams -> Data.Int.Int64 -- ^ __device_ordinal__: The device to use. -> TensorList (v'1) tinputs -- ^ __inputs__: A list of tensors that will be sent to the XLA computation. -> Tensor v'2 Data.ByteString.ByteString -- ^ __dynamic_key__: The key sent at runtime by the compile node to identify which -- execution the transfer corresponds to. -> m' (ControlNode) _XlaSendFromHost' op'options device_ordinal inputs dynamic_key | eqLengthGuard [] = build $ do op'inputs <- fmap Prelude.concat $ Prelude.sequence [buildInputs inputs, buildInputs dynamic_key] buildOp [] (opDef "_XlaSendFromHost" & opAttr "Tinputs" .~ fromTensorTypes (Proxy :: Proxy tinputs) & opAttr "device_ordinal" .~ device_ordinal & op'options & opInputs .~ op'inputs) {- input_arg { name: "inputs" description: "A list of tensors that will be sent to the XLA computation." type_list_attr: "Tinputs" } input_arg { name: "dynamic_key" description: "The key sent at runtime by the compile node to identify which\nexecution the transfer corresponds to." type: DT_STRING } attr { name: "Tinputs" type: "list(type)" description: "The element types of each element in `inputs`." has_minimum: true } attr { name: "key" type: "string" description: "A key that is unique in the computation and associates the send with the consumer in\nthe XLA computation." } attr { name: "device_ordinal" type: "int" description: "The device to use." } -}