{-# LANGUAGE DeriveGeneric #-} {-# LANGUAGE DuplicateRecordFields #-} {-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE RecordWildCards #-} {-# LANGUAGE StrictData #-} {-# LANGUAGE NoImplicitPrelude #-} {-# OPTIONS_GHC -fno-warn-unused-imports #-} {-# OPTIONS_GHC -fno-warn-unused-matches #-} -- Derived from AWS service descriptions, licensed under Apache 2.0. -- | -- Module : Amazonka.SageMaker.Types.OutputConfig -- Copyright : (c) 2013-2023 Brendan Hay -- License : Mozilla Public License, v. 2.0. -- Maintainer : Brendan Hay -- Stability : auto-generated -- Portability : non-portable (GHC extensions) module Amazonka.SageMaker.Types.OutputConfig where import qualified Amazonka.Core as Core import qualified Amazonka.Core.Lens.Internal as Lens import qualified Amazonka.Data as Data import qualified Amazonka.Prelude as Prelude import Amazonka.SageMaker.Types.TargetDevice import Amazonka.SageMaker.Types.TargetPlatform -- | Contains information about the output location for the compiled model -- and the target device that the model runs on. @TargetDevice@ and -- @TargetPlatform@ are mutually exclusive, so you need to choose one -- between the two to specify your target device or platform. If you cannot -- find your device you want to use from the @TargetDevice@ list, use -- @TargetPlatform@ to describe the platform of your edge device and -- @CompilerOptions@ if there are specific settings that are required or -- recommended to use for particular TargetPlatform. -- -- /See:/ 'newOutputConfig' smart constructor. data OutputConfig = OutputConfig' { -- | Specifies additional parameters for compiler options in JSON format. The -- compiler options are @TargetPlatform@ specific. It is required for -- NVIDIA accelerators and highly recommended for CPU compilations. For any -- other cases, it is optional to specify @CompilerOptions.@ -- -- - @DTYPE@: Specifies the data type for the input. When compiling for -- @ml_*@ (except for @ml_inf@) instances using PyTorch framework, -- provide the data type (dtype) of the model\'s input. @\"float32\"@ -- is used if @\"DTYPE\"@ is not specified. Options for data type are: -- -- - float32: Use either @\"float\"@ or @\"float32\"@. -- -- - int64: Use either @\"int64\"@ or @\"long\"@. -- -- For example, @{\"dtype\" : \"float32\"}@. -- -- - @CPU@: Compilation for CPU supports the following compiler options. -- -- - @mcpu@: CPU micro-architecture. For example, -- @{\'mcpu\': \'skylake-avx512\'}@ -- -- - @mattr@: CPU flags. For example, -- @{\'mattr\': [\'+neon\', \'+vfpv4\']}@ -- -- - @ARM@: Details of ARM CPU compilations. -- -- - @NEON@: NEON is an implementation of the Advanced SIMD extension -- used in ARMv7 processors. -- -- For example, add @{\'mattr\': [\'+neon\']}@ to the compiler -- options if compiling for ARM 32-bit platform with the NEON -- support. -- -- - @NVIDIA@: Compilation for NVIDIA GPU supports the following compiler -- options. -- -- - @gpu_code@: Specifies the targeted architecture. -- -- - @trt-ver@: Specifies the TensorRT versions in x.y.z. format. -- -- - @cuda-ver@: Specifies the CUDA version in x.y format. -- -- For example, -- @{\'gpu-code\': \'sm_72\', \'trt-ver\': \'6.0.1\', \'cuda-ver\': \'10.1\'}@ -- -- - @ANDROID@: Compilation for the Android OS supports the following -- compiler options: -- -- - @ANDROID_PLATFORM@: Specifies the Android API levels. Available -- levels range from 21 to 29. For example, -- @{\'ANDROID_PLATFORM\': 28}@. -- -- - @mattr@: Add @{\'mattr\': [\'+neon\']}@ to compiler options if -- compiling for ARM 32-bit platform with NEON support. -- -- - @INFERENTIA@: Compilation for target ml_inf1 uses compiler options -- passed in as a JSON string. For example, -- @\"CompilerOptions\": \"\\\"--verbose 1 --num-neuroncores 2 -O2\\\"\"@. -- -- For information about supported compiler options, see -- <https://github.com/aws/aws-neuron-sdk/blob/master/docs/neuron-cc/command-line-reference.md Neuron Compiler CLI>. -- -- - @CoreML@: Compilation for the CoreML OutputConfig$TargetDevice -- supports the following compiler options: -- -- - @class_labels@: Specifies the classification labels file name -- inside input tar.gz file. For example, -- @{\"class_labels\": \"imagenet_labels_1000.txt\"}@. Labels -- inside the txt file should be separated by newlines. -- -- - @EIA@: Compilation for the Elastic Inference Accelerator supports -- the following compiler options: -- -- - @precision_mode@: Specifies the precision of compiled artifacts. -- Supported values are @\"FP16\"@ and @\"FP32\"@. Default is -- @\"FP32\"@. -- -- - @signature_def_key@: Specifies the signature to use for models -- in SavedModel format. Defaults is TensorFlow\'s default -- signature def key. -- -- - @output_names@: Specifies a list of output tensor names for -- models in FrozenGraph format. Set at most one API field, either: -- @signature_def_key@ or @output_names@. -- -- For example: -- @{\"precision_mode\": \"FP32\", \"output_names\": [\"output:0\"]}@ OutputConfig -> Maybe Text compilerOptions :: Prelude.Maybe Prelude.Text, -- | The Amazon Web Services Key Management Service key (Amazon Web Services -- KMS) that Amazon SageMaker uses to encrypt your output models with -- Amazon S3 server-side encryption after compilation job. If you don\'t -- provide a KMS key ID, Amazon SageMaker uses the default KMS key for -- Amazon S3 for your role\'s account. For more information, see -- <https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html KMS-Managed Encryption Keys> -- in the /Amazon Simple Storage Service Developer Guide./ -- -- The KmsKeyId can be any of the following formats: -- -- - Key ID: @1234abcd-12ab-34cd-56ef-1234567890ab@ -- -- - Key ARN: -- @arn:aws:kms:us-west-2:111122223333:key\/1234abcd-12ab-34cd-56ef-1234567890ab@ -- -- - Alias name: @alias\/ExampleAlias@ -- -- - Alias name ARN: -- @arn:aws:kms:us-west-2:111122223333:alias\/ExampleAlias@ OutputConfig -> Maybe Text kmsKeyId :: Prelude.Maybe Prelude.Text, -- | Identifies the target device or the machine learning instance that you -- want to run your model on after the compilation has completed. -- Alternatively, you can specify OS, architecture, and accelerator using -- TargetPlatform fields. It can be used instead of @TargetPlatform@. OutputConfig -> Maybe TargetDevice targetDevice :: Prelude.Maybe TargetDevice, -- | Contains information about a target platform that you want your model to -- run on, such as OS, architecture, and accelerators. It is an alternative -- of @TargetDevice@. -- -- The following examples show how to configure the @TargetPlatform@ and -- @CompilerOptions@ JSON strings for popular target platforms: -- -- - Raspberry Pi 3 Model B+ -- -- @\"TargetPlatform\": {\"Os\": \"LINUX\", \"Arch\": \"ARM_EABIHF\"},@ -- -- @ \"CompilerOptions\": {\'mattr\': [\'+neon\']}@ -- -- - Jetson TX2 -- -- @\"TargetPlatform\": {\"Os\": \"LINUX\", \"Arch\": \"ARM64\", \"Accelerator\": \"NVIDIA\"},@ -- -- @ \"CompilerOptions\": {\'gpu-code\': \'sm_62\', \'trt-ver\': \'6.0.1\', \'cuda-ver\': \'10.0\'}@ -- -- - EC2 m5.2xlarge instance OS -- -- @\"TargetPlatform\": {\"Os\": \"LINUX\", \"Arch\": \"X86_64\", \"Accelerator\": \"NVIDIA\"},@ -- -- @ \"CompilerOptions\": {\'mcpu\': \'skylake-avx512\'}@ -- -- - RK3399 -- -- @\"TargetPlatform\": {\"Os\": \"LINUX\", \"Arch\": \"ARM64\", \"Accelerator\": \"MALI\"}@ -- -- - ARMv7 phone (CPU) -- -- @\"TargetPlatform\": {\"Os\": \"ANDROID\", \"Arch\": \"ARM_EABI\"},@ -- -- @ \"CompilerOptions\": {\'ANDROID_PLATFORM\': 25, \'mattr\': [\'+neon\']}@ -- -- - ARMv8 phone (CPU) -- -- @\"TargetPlatform\": {\"Os\": \"ANDROID\", \"Arch\": \"ARM64\"},@ -- -- @ \"CompilerOptions\": {\'ANDROID_PLATFORM\': 29}@ OutputConfig -> Maybe TargetPlatform targetPlatform :: Prelude.Maybe TargetPlatform, -- | Identifies the S3 bucket where you want Amazon SageMaker to store the -- model artifacts. For example, @s3:\/\/bucket-name\/key-name-prefix@. OutputConfig -> Text s3OutputLocation :: Prelude.Text } deriving (OutputConfig -> OutputConfig -> Bool forall a. (a -> a -> Bool) -> (a -> a -> Bool) -> Eq a /= :: OutputConfig -> OutputConfig -> Bool $c/= :: OutputConfig -> OutputConfig -> Bool == :: OutputConfig -> OutputConfig -> Bool $c== :: OutputConfig -> OutputConfig -> Bool Prelude.Eq, ReadPrec [OutputConfig] ReadPrec OutputConfig Int -> ReadS OutputConfig ReadS [OutputConfig] forall a. (Int -> ReadS a) -> ReadS [a] -> ReadPrec a -> ReadPrec [a] -> Read a readListPrec :: ReadPrec [OutputConfig] $creadListPrec :: ReadPrec [OutputConfig] readPrec :: ReadPrec OutputConfig $creadPrec :: ReadPrec OutputConfig readList :: ReadS [OutputConfig] $creadList :: ReadS [OutputConfig] readsPrec :: Int -> ReadS OutputConfig $creadsPrec :: Int -> ReadS OutputConfig Prelude.Read, Int -> OutputConfig -> ShowS [OutputConfig] -> ShowS OutputConfig -> String forall a. (Int -> a -> ShowS) -> (a -> String) -> ([a] -> ShowS) -> Show a showList :: [OutputConfig] -> ShowS $cshowList :: [OutputConfig] -> ShowS show :: OutputConfig -> String $cshow :: OutputConfig -> String showsPrec :: Int -> OutputConfig -> ShowS $cshowsPrec :: Int -> OutputConfig -> ShowS Prelude.Show, forall x. Rep OutputConfig x -> OutputConfig forall x. OutputConfig -> Rep OutputConfig x forall a. (forall x. a -> Rep a x) -> (forall x. Rep a x -> a) -> Generic a $cto :: forall x. Rep OutputConfig x -> OutputConfig $cfrom :: forall x. OutputConfig -> Rep OutputConfig x Prelude.Generic) -- | -- Create a value of 'OutputConfig' with all optional fields omitted. -- -- Use <https://hackage.haskell.org/package/generic-lens generic-lens> or <https://hackage.haskell.org/package/optics optics> to modify other optional fields. -- -- The following record fields are available, with the corresponding lenses provided -- for backwards compatibility: -- -- 'compilerOptions', 'outputConfig_compilerOptions' - Specifies additional parameters for compiler options in JSON format. The -- compiler options are @TargetPlatform@ specific. It is required for -- NVIDIA accelerators and highly recommended for CPU compilations. For any -- other cases, it is optional to specify @CompilerOptions.@ -- -- - @DTYPE@: Specifies the data type for the input. When compiling for -- @ml_*@ (except for @ml_inf@) instances using PyTorch framework, -- provide the data type (dtype) of the model\'s input. @\"float32\"@ -- is used if @\"DTYPE\"@ is not specified. Options for data type are: -- -- - float32: Use either @\"float\"@ or @\"float32\"@. -- -- - int64: Use either @\"int64\"@ or @\"long\"@. -- -- For example, @{\"dtype\" : \"float32\"}@. -- -- - @CPU@: Compilation for CPU supports the following compiler options. -- -- - @mcpu@: CPU micro-architecture. For example, -- @{\'mcpu\': \'skylake-avx512\'}@ -- -- - @mattr@: CPU flags. For example, -- @{\'mattr\': [\'+neon\', \'+vfpv4\']}@ -- -- - @ARM@: Details of ARM CPU compilations. -- -- - @NEON@: NEON is an implementation of the Advanced SIMD extension -- used in ARMv7 processors. -- -- For example, add @{\'mattr\': [\'+neon\']}@ to the compiler -- options if compiling for ARM 32-bit platform with the NEON -- support. -- -- - @NVIDIA@: Compilation for NVIDIA GPU supports the following compiler -- options. -- -- - @gpu_code@: Specifies the targeted architecture. -- -- - @trt-ver@: Specifies the TensorRT versions in x.y.z. format. -- -- - @cuda-ver@: Specifies the CUDA version in x.y format. -- -- For example, -- @{\'gpu-code\': \'sm_72\', \'trt-ver\': \'6.0.1\', \'cuda-ver\': \'10.1\'}@ -- -- - @ANDROID@: Compilation for the Android OS supports the following -- compiler options: -- -- - @ANDROID_PLATFORM@: Specifies the Android API levels. Available -- levels range from 21 to 29. For example, -- @{\'ANDROID_PLATFORM\': 28}@. -- -- - @mattr@: Add @{\'mattr\': [\'+neon\']}@ to compiler options if -- compiling for ARM 32-bit platform with NEON support. -- -- - @INFERENTIA@: Compilation for target ml_inf1 uses compiler options -- passed in as a JSON string. For example, -- @\"CompilerOptions\": \"\\\"--verbose 1 --num-neuroncores 2 -O2\\\"\"@. -- -- For information about supported compiler options, see -- <https://github.com/aws/aws-neuron-sdk/blob/master/docs/neuron-cc/command-line-reference.md Neuron Compiler CLI>. -- -- - @CoreML@: Compilation for the CoreML OutputConfig$TargetDevice -- supports the following compiler options: -- -- - @class_labels@: Specifies the classification labels file name -- inside input tar.gz file. For example, -- @{\"class_labels\": \"imagenet_labels_1000.txt\"}@. Labels -- inside the txt file should be separated by newlines. -- -- - @EIA@: Compilation for the Elastic Inference Accelerator supports -- the following compiler options: -- -- - @precision_mode@: Specifies the precision of compiled artifacts. -- Supported values are @\"FP16\"@ and @\"FP32\"@. Default is -- @\"FP32\"@. -- -- - @signature_def_key@: Specifies the signature to use for models -- in SavedModel format. Defaults is TensorFlow\'s default -- signature def key. -- -- - @output_names@: Specifies a list of output tensor names for -- models in FrozenGraph format. Set at most one API field, either: -- @signature_def_key@ or @output_names@. -- -- For example: -- @{\"precision_mode\": \"FP32\", \"output_names\": [\"output:0\"]}@ -- -- 'kmsKeyId', 'outputConfig_kmsKeyId' - The Amazon Web Services Key Management Service key (Amazon Web Services -- KMS) that Amazon SageMaker uses to encrypt your output models with -- Amazon S3 server-side encryption after compilation job. If you don\'t -- provide a KMS key ID, Amazon SageMaker uses the default KMS key for -- Amazon S3 for your role\'s account. For more information, see -- <https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html KMS-Managed Encryption Keys> -- in the /Amazon Simple Storage Service Developer Guide./ -- -- The KmsKeyId can be any of the following formats: -- -- - Key ID: @1234abcd-12ab-34cd-56ef-1234567890ab@ -- -- - Key ARN: -- @arn:aws:kms:us-west-2:111122223333:key\/1234abcd-12ab-34cd-56ef-1234567890ab@ -- -- - Alias name: @alias\/ExampleAlias@ -- -- - Alias name ARN: -- @arn:aws:kms:us-west-2:111122223333:alias\/ExampleAlias@ -- -- 'targetDevice', 'outputConfig_targetDevice' - Identifies the target device or the machine learning instance that you -- want to run your model on after the compilation has completed. -- Alternatively, you can specify OS, architecture, and accelerator using -- TargetPlatform fields. It can be used instead of @TargetPlatform@. -- -- 'targetPlatform', 'outputConfig_targetPlatform' - Contains information about a target platform that you want your model to -- run on, such as OS, architecture, and accelerators. It is an alternative -- of @TargetDevice@. -- -- The following examples show how to configure the @TargetPlatform@ and -- @CompilerOptions@ JSON strings for popular target platforms: -- -- - Raspberry Pi 3 Model B+ -- -- @\"TargetPlatform\": {\"Os\": \"LINUX\", \"Arch\": \"ARM_EABIHF\"},@ -- -- @ \"CompilerOptions\": {\'mattr\': [\'+neon\']}@ -- -- - Jetson TX2 -- -- @\"TargetPlatform\": {\"Os\": \"LINUX\", \"Arch\": \"ARM64\", \"Accelerator\": \"NVIDIA\"},@ -- -- @ \"CompilerOptions\": {\'gpu-code\': \'sm_62\', \'trt-ver\': \'6.0.1\', \'cuda-ver\': \'10.0\'}@ -- -- - EC2 m5.2xlarge instance OS -- -- @\"TargetPlatform\": {\"Os\": \"LINUX\", \"Arch\": \"X86_64\", \"Accelerator\": \"NVIDIA\"},@ -- -- @ \"CompilerOptions\": {\'mcpu\': \'skylake-avx512\'}@ -- -- - RK3399 -- -- @\"TargetPlatform\": {\"Os\": \"LINUX\", \"Arch\": \"ARM64\", \"Accelerator\": \"MALI\"}@ -- -- - ARMv7 phone (CPU) -- -- @\"TargetPlatform\": {\"Os\": \"ANDROID\", \"Arch\": \"ARM_EABI\"},@ -- -- @ \"CompilerOptions\": {\'ANDROID_PLATFORM\': 25, \'mattr\': [\'+neon\']}@ -- -- - ARMv8 phone (CPU) -- -- @\"TargetPlatform\": {\"Os\": \"ANDROID\", \"Arch\": \"ARM64\"},@ -- -- @ \"CompilerOptions\": {\'ANDROID_PLATFORM\': 29}@ -- -- 's3OutputLocation', 'outputConfig_s3OutputLocation' - Identifies the S3 bucket where you want Amazon SageMaker to store the -- model artifacts. For example, @s3:\/\/bucket-name\/key-name-prefix@. newOutputConfig :: -- | 's3OutputLocation' Prelude.Text -> OutputConfig newOutputConfig :: Text -> OutputConfig newOutputConfig Text pS3OutputLocation_ = OutputConfig' { $sel:compilerOptions:OutputConfig' :: Maybe Text compilerOptions = forall a. Maybe a Prelude.Nothing, $sel:kmsKeyId:OutputConfig' :: Maybe Text kmsKeyId = forall a. Maybe a Prelude.Nothing, $sel:targetDevice:OutputConfig' :: Maybe TargetDevice targetDevice = forall a. Maybe a Prelude.Nothing, $sel:targetPlatform:OutputConfig' :: Maybe TargetPlatform targetPlatform = forall a. Maybe a Prelude.Nothing, $sel:s3OutputLocation:OutputConfig' :: Text s3OutputLocation = Text pS3OutputLocation_ } -- | Specifies additional parameters for compiler options in JSON format. The -- compiler options are @TargetPlatform@ specific. It is required for -- NVIDIA accelerators and highly recommended for CPU compilations. For any -- other cases, it is optional to specify @CompilerOptions.@ -- -- - @DTYPE@: Specifies the data type for the input. When compiling for -- @ml_*@ (except for @ml_inf@) instances using PyTorch framework, -- provide the data type (dtype) of the model\'s input. @\"float32\"@ -- is used if @\"DTYPE\"@ is not specified. Options for data type are: -- -- - float32: Use either @\"float\"@ or @\"float32\"@. -- -- - int64: Use either @\"int64\"@ or @\"long\"@. -- -- For example, @{\"dtype\" : \"float32\"}@. -- -- - @CPU@: Compilation for CPU supports the following compiler options. -- -- - @mcpu@: CPU micro-architecture. For example, -- @{\'mcpu\': \'skylake-avx512\'}@ -- -- - @mattr@: CPU flags. For example, -- @{\'mattr\': [\'+neon\', \'+vfpv4\']}@ -- -- - @ARM@: Details of ARM CPU compilations. -- -- - @NEON@: NEON is an implementation of the Advanced SIMD extension -- used in ARMv7 processors. -- -- For example, add @{\'mattr\': [\'+neon\']}@ to the compiler -- options if compiling for ARM 32-bit platform with the NEON -- support. -- -- - @NVIDIA@: Compilation for NVIDIA GPU supports the following compiler -- options. -- -- - @gpu_code@: Specifies the targeted architecture. -- -- - @trt-ver@: Specifies the TensorRT versions in x.y.z. format. -- -- - @cuda-ver@: Specifies the CUDA version in x.y format. -- -- For example, -- @{\'gpu-code\': \'sm_72\', \'trt-ver\': \'6.0.1\', \'cuda-ver\': \'10.1\'}@ -- -- - @ANDROID@: Compilation for the Android OS supports the following -- compiler options: -- -- - @ANDROID_PLATFORM@: Specifies the Android API levels. Available -- levels range from 21 to 29. For example, -- @{\'ANDROID_PLATFORM\': 28}@. -- -- - @mattr@: Add @{\'mattr\': [\'+neon\']}@ to compiler options if -- compiling for ARM 32-bit platform with NEON support. -- -- - @INFERENTIA@: Compilation for target ml_inf1 uses compiler options -- passed in as a JSON string. For example, -- @\"CompilerOptions\": \"\\\"--verbose 1 --num-neuroncores 2 -O2\\\"\"@. -- -- For information about supported compiler options, see -- <https://github.com/aws/aws-neuron-sdk/blob/master/docs/neuron-cc/command-line-reference.md Neuron Compiler CLI>. -- -- - @CoreML@: Compilation for the CoreML OutputConfig$TargetDevice -- supports the following compiler options: -- -- - @class_labels@: Specifies the classification labels file name -- inside input tar.gz file. For example, -- @{\"class_labels\": \"imagenet_labels_1000.txt\"}@. Labels -- inside the txt file should be separated by newlines. -- -- - @EIA@: Compilation for the Elastic Inference Accelerator supports -- the following compiler options: -- -- - @precision_mode@: Specifies the precision of compiled artifacts. -- Supported values are @\"FP16\"@ and @\"FP32\"@. Default is -- @\"FP32\"@. -- -- - @signature_def_key@: Specifies the signature to use for models -- in SavedModel format. Defaults is TensorFlow\'s default -- signature def key. -- -- - @output_names@: Specifies a list of output tensor names for -- models in FrozenGraph format. Set at most one API field, either: -- @signature_def_key@ or @output_names@. -- -- For example: -- @{\"precision_mode\": \"FP32\", \"output_names\": [\"output:0\"]}@ outputConfig_compilerOptions :: Lens.Lens' OutputConfig (Prelude.Maybe Prelude.Text) outputConfig_compilerOptions :: Lens' OutputConfig (Maybe Text) outputConfig_compilerOptions = forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b Lens.lens (\OutputConfig' {Maybe Text compilerOptions :: Maybe Text $sel:compilerOptions:OutputConfig' :: OutputConfig -> Maybe Text compilerOptions} -> Maybe Text compilerOptions) (\s :: OutputConfig s@OutputConfig' {} Maybe Text a -> OutputConfig s {$sel:compilerOptions:OutputConfig' :: Maybe Text compilerOptions = Maybe Text a} :: OutputConfig) -- | The Amazon Web Services Key Management Service key (Amazon Web Services -- KMS) that Amazon SageMaker uses to encrypt your output models with -- Amazon S3 server-side encryption after compilation job. If you don\'t -- provide a KMS key ID, Amazon SageMaker uses the default KMS key for -- Amazon S3 for your role\'s account. For more information, see -- <https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html KMS-Managed Encryption Keys> -- in the /Amazon Simple Storage Service Developer Guide./ -- -- The KmsKeyId can be any of the following formats: -- -- - Key ID: @1234abcd-12ab-34cd-56ef-1234567890ab@ -- -- - Key ARN: -- @arn:aws:kms:us-west-2:111122223333:key\/1234abcd-12ab-34cd-56ef-1234567890ab@ -- -- - Alias name: @alias\/ExampleAlias@ -- -- - Alias name ARN: -- @arn:aws:kms:us-west-2:111122223333:alias\/ExampleAlias@ outputConfig_kmsKeyId :: Lens.Lens' OutputConfig (Prelude.Maybe Prelude.Text) outputConfig_kmsKeyId :: Lens' OutputConfig (Maybe Text) outputConfig_kmsKeyId = forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b Lens.lens (\OutputConfig' {Maybe Text kmsKeyId :: Maybe Text $sel:kmsKeyId:OutputConfig' :: OutputConfig -> Maybe Text kmsKeyId} -> Maybe Text kmsKeyId) (\s :: OutputConfig s@OutputConfig' {} Maybe Text a -> OutputConfig s {$sel:kmsKeyId:OutputConfig' :: Maybe Text kmsKeyId = Maybe Text a} :: OutputConfig) -- | Identifies the target device or the machine learning instance that you -- want to run your model on after the compilation has completed. -- Alternatively, you can specify OS, architecture, and accelerator using -- TargetPlatform fields. It can be used instead of @TargetPlatform@. outputConfig_targetDevice :: Lens.Lens' OutputConfig (Prelude.Maybe TargetDevice) outputConfig_targetDevice :: Lens' OutputConfig (Maybe TargetDevice) outputConfig_targetDevice = forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b Lens.lens (\OutputConfig' {Maybe TargetDevice targetDevice :: Maybe TargetDevice $sel:targetDevice:OutputConfig' :: OutputConfig -> Maybe TargetDevice targetDevice} -> Maybe TargetDevice targetDevice) (\s :: OutputConfig s@OutputConfig' {} Maybe TargetDevice a -> OutputConfig s {$sel:targetDevice:OutputConfig' :: Maybe TargetDevice targetDevice = Maybe TargetDevice a} :: OutputConfig) -- | Contains information about a target platform that you want your model to -- run on, such as OS, architecture, and accelerators. It is an alternative -- of @TargetDevice@. -- -- The following examples show how to configure the @TargetPlatform@ and -- @CompilerOptions@ JSON strings for popular target platforms: -- -- - Raspberry Pi 3 Model B+ -- -- @\"TargetPlatform\": {\"Os\": \"LINUX\", \"Arch\": \"ARM_EABIHF\"},@ -- -- @ \"CompilerOptions\": {\'mattr\': [\'+neon\']}@ -- -- - Jetson TX2 -- -- @\"TargetPlatform\": {\"Os\": \"LINUX\", \"Arch\": \"ARM64\", \"Accelerator\": \"NVIDIA\"},@ -- -- @ \"CompilerOptions\": {\'gpu-code\': \'sm_62\', \'trt-ver\': \'6.0.1\', \'cuda-ver\': \'10.0\'}@ -- -- - EC2 m5.2xlarge instance OS -- -- @\"TargetPlatform\": {\"Os\": \"LINUX\", \"Arch\": \"X86_64\", \"Accelerator\": \"NVIDIA\"},@ -- -- @ \"CompilerOptions\": {\'mcpu\': \'skylake-avx512\'}@ -- -- - RK3399 -- -- @\"TargetPlatform\": {\"Os\": \"LINUX\", \"Arch\": \"ARM64\", \"Accelerator\": \"MALI\"}@ -- -- - ARMv7 phone (CPU) -- -- @\"TargetPlatform\": {\"Os\": \"ANDROID\", \"Arch\": \"ARM_EABI\"},@ -- -- @ \"CompilerOptions\": {\'ANDROID_PLATFORM\': 25, \'mattr\': [\'+neon\']}@ -- -- - ARMv8 phone (CPU) -- -- @\"TargetPlatform\": {\"Os\": \"ANDROID\", \"Arch\": \"ARM64\"},@ -- -- @ \"CompilerOptions\": {\'ANDROID_PLATFORM\': 29}@ outputConfig_targetPlatform :: Lens.Lens' OutputConfig (Prelude.Maybe TargetPlatform) outputConfig_targetPlatform :: Lens' OutputConfig (Maybe TargetPlatform) outputConfig_targetPlatform = forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b Lens.lens (\OutputConfig' {Maybe TargetPlatform targetPlatform :: Maybe TargetPlatform $sel:targetPlatform:OutputConfig' :: OutputConfig -> Maybe TargetPlatform targetPlatform} -> Maybe TargetPlatform targetPlatform) (\s :: OutputConfig s@OutputConfig' {} Maybe TargetPlatform a -> OutputConfig s {$sel:targetPlatform:OutputConfig' :: Maybe TargetPlatform targetPlatform = Maybe TargetPlatform a} :: OutputConfig) -- | Identifies the S3 bucket where you want Amazon SageMaker to store the -- model artifacts. For example, @s3:\/\/bucket-name\/key-name-prefix@. outputConfig_s3OutputLocation :: Lens.Lens' OutputConfig Prelude.Text outputConfig_s3OutputLocation :: Lens' OutputConfig Text outputConfig_s3OutputLocation = forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b Lens.lens (\OutputConfig' {Text s3OutputLocation :: Text $sel:s3OutputLocation:OutputConfig' :: OutputConfig -> Text s3OutputLocation} -> Text s3OutputLocation) (\s :: OutputConfig s@OutputConfig' {} Text a -> OutputConfig s {$sel:s3OutputLocation:OutputConfig' :: Text s3OutputLocation = Text a} :: OutputConfig) instance Data.FromJSON OutputConfig where parseJSON :: Value -> Parser OutputConfig parseJSON = forall a. String -> (Object -> Parser a) -> Value -> Parser a Data.withObject String "OutputConfig" ( \Object x -> Maybe Text -> Maybe Text -> Maybe TargetDevice -> Maybe TargetPlatform -> Text -> OutputConfig OutputConfig' forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b Prelude.<$> (Object x forall a. FromJSON a => Object -> Key -> Parser (Maybe a) Data..:? Key "CompilerOptions") forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b Prelude.<*> (Object x forall a. FromJSON a => Object -> Key -> Parser (Maybe a) Data..:? Key "KmsKeyId") forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b Prelude.<*> (Object x forall a. FromJSON a => Object -> Key -> Parser (Maybe a) Data..:? Key "TargetDevice") forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b Prelude.<*> (Object x forall a. FromJSON a => Object -> Key -> Parser (Maybe a) Data..:? Key "TargetPlatform") forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b Prelude.<*> (Object x forall a. FromJSON a => Object -> Key -> Parser a Data..: Key "S3OutputLocation") ) instance Prelude.Hashable OutputConfig where hashWithSalt :: Int -> OutputConfig -> Int hashWithSalt Int _salt OutputConfig' {Maybe Text Maybe TargetDevice Maybe TargetPlatform Text s3OutputLocation :: Text targetPlatform :: Maybe TargetPlatform targetDevice :: Maybe TargetDevice kmsKeyId :: Maybe Text compilerOptions :: Maybe Text $sel:s3OutputLocation:OutputConfig' :: OutputConfig -> Text $sel:targetPlatform:OutputConfig' :: OutputConfig -> Maybe TargetPlatform $sel:targetDevice:OutputConfig' :: OutputConfig -> Maybe TargetDevice $sel:kmsKeyId:OutputConfig' :: OutputConfig -> Maybe Text $sel:compilerOptions:OutputConfig' :: OutputConfig -> Maybe Text ..} = Int _salt forall a. Hashable a => Int -> a -> Int `Prelude.hashWithSalt` Maybe Text compilerOptions forall a. Hashable a => Int -> a -> Int `Prelude.hashWithSalt` Maybe Text kmsKeyId forall a. Hashable a => Int -> a -> Int `Prelude.hashWithSalt` Maybe TargetDevice targetDevice forall a. Hashable a => Int -> a -> Int `Prelude.hashWithSalt` Maybe TargetPlatform targetPlatform forall a. Hashable a => Int -> a -> Int `Prelude.hashWithSalt` Text s3OutputLocation instance Prelude.NFData OutputConfig where rnf :: OutputConfig -> () rnf OutputConfig' {Maybe Text Maybe TargetDevice Maybe TargetPlatform Text s3OutputLocation :: Text targetPlatform :: Maybe TargetPlatform targetDevice :: Maybe TargetDevice kmsKeyId :: Maybe Text compilerOptions :: Maybe Text $sel:s3OutputLocation:OutputConfig' :: OutputConfig -> Text $sel:targetPlatform:OutputConfig' :: OutputConfig -> Maybe TargetPlatform $sel:targetDevice:OutputConfig' :: OutputConfig -> Maybe TargetDevice $sel:kmsKeyId:OutputConfig' :: OutputConfig -> Maybe Text $sel:compilerOptions:OutputConfig' :: OutputConfig -> Maybe Text ..} = forall a. NFData a => a -> () Prelude.rnf Maybe Text compilerOptions seq :: forall a b. a -> b -> b `Prelude.seq` forall a. NFData a => a -> () Prelude.rnf Maybe Text kmsKeyId seq :: forall a b. a -> b -> b `Prelude.seq` forall a. NFData a => a -> () Prelude.rnf Maybe TargetDevice targetDevice seq :: forall a b. a -> b -> b `Prelude.seq` forall a. NFData a => a -> () Prelude.rnf Maybe TargetPlatform targetPlatform seq :: forall a b. a -> b -> b `Prelude.seq` forall a. NFData a => a -> () Prelude.rnf Text s3OutputLocation instance Data.ToJSON OutputConfig where toJSON :: OutputConfig -> Value toJSON OutputConfig' {Maybe Text Maybe TargetDevice Maybe TargetPlatform Text s3OutputLocation :: Text targetPlatform :: Maybe TargetPlatform targetDevice :: Maybe TargetDevice kmsKeyId :: Maybe Text compilerOptions :: Maybe Text $sel:s3OutputLocation:OutputConfig' :: OutputConfig -> Text $sel:targetPlatform:OutputConfig' :: OutputConfig -> Maybe TargetPlatform $sel:targetDevice:OutputConfig' :: OutputConfig -> Maybe TargetDevice $sel:kmsKeyId:OutputConfig' :: OutputConfig -> Maybe Text $sel:compilerOptions:OutputConfig' :: OutputConfig -> Maybe Text ..} = [Pair] -> Value Data.object ( forall a. [Maybe a] -> [a] Prelude.catMaybes [ (Key "CompilerOptions" forall kv v. (KeyValue kv, ToJSON v) => Key -> v -> kv Data..=) forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b Prelude.<$> Maybe Text compilerOptions, (Key "KmsKeyId" forall kv v. (KeyValue kv, ToJSON v) => Key -> v -> kv Data..=) forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b Prelude.<$> Maybe Text kmsKeyId, (Key "TargetDevice" forall kv v. (KeyValue kv, ToJSON v) => Key -> v -> kv Data..=) forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b Prelude.<$> Maybe TargetDevice targetDevice, (Key "TargetPlatform" forall kv v. (KeyValue kv, ToJSON v) => Key -> v -> kv Data..=) forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b Prelude.<$> Maybe TargetPlatform targetPlatform, forall a. a -> Maybe a Prelude.Just (Key "S3OutputLocation" forall kv v. (KeyValue kv, ToJSON v) => Key -> v -> kv Data..= Text s3OutputLocation) ] )