{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE DuplicateRecordFields #-}
{-# LANGUAGE NamedFieldPuns #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE StrictData #-}
{-# LANGUAGE NoImplicitPrelude #-}
{-# OPTIONS_GHC -fno-warn-unused-imports #-}
{-# OPTIONS_GHC -fno-warn-unused-matches #-}

-- Derived from AWS service descriptions, licensed under Apache 2.0.

-- |
-- Module      : Amazonka.DMS.Types.S3Settings
-- Copyright   : (c) 2013-2023 Brendan Hay
-- License     : Mozilla Public License, v. 2.0.
-- Maintainer  : Brendan Hay
-- Stability   : auto-generated
-- Portability : non-portable (GHC extensions)
module Amazonka.DMS.Types.S3Settings where

import qualified Amazonka.Core as Core
import qualified Amazonka.Core.Lens.Internal as Lens
import Amazonka.DMS.Types.CannedAclForObjectsValue
import Amazonka.DMS.Types.CompressionTypeValue
import Amazonka.DMS.Types.DataFormatValue
import Amazonka.DMS.Types.DatePartitionDelimiterValue
import Amazonka.DMS.Types.DatePartitionSequenceValue
import Amazonka.DMS.Types.EncodingTypeValue
import Amazonka.DMS.Types.EncryptionModeValue
import Amazonka.DMS.Types.ParquetVersionValue
import qualified Amazonka.Data as Data
import qualified Amazonka.Prelude as Prelude

-- | Settings for exporting data to Amazon S3.
--
-- /See:/ 'newS3Settings' smart constructor.
data S3Settings = S3Settings'
  { -- | An optional parameter that, when set to @true@ or @y@, you can use to
    -- add column name information to the .csv output file.
    --
    -- The default value is @false@. Valid values are @true@, @false@, @y@, and
    -- @n@.
    S3Settings -> Maybe Bool
addColumnName :: Prelude.Maybe Prelude.Bool,
    -- | Use the S3 target endpoint setting @AddTrailingPaddingCharacter@ to add
    -- padding on string data. The default value is @false@.
    S3Settings -> Maybe Bool
addTrailingPaddingCharacter :: Prelude.Maybe Prelude.Bool,
    -- | An optional parameter to set a folder name in the S3 bucket. If
    -- provided, tables are created in the path
    -- @ @/@bucketFolder@/@\/@/@schema_name@/@\/@/@table_name@/@\/@. If this
    -- parameter isn\'t specified, then the path used is
    -- @ @/@schema_name@/@\/@/@table_name@/@\/@.
    S3Settings -> Maybe Text
bucketFolder :: Prelude.Maybe Prelude.Text,
    -- | The name of the S3 bucket.
    S3Settings -> Maybe Text
bucketName :: Prelude.Maybe Prelude.Text,
    -- | A value that enables DMS to specify a predefined (canned) access control
    -- list for objects created in an Amazon S3 bucket as .csv or .parquet
    -- files. For more information about Amazon S3 canned ACLs, see
    -- <http://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl Canned ACL>
    -- in the /Amazon S3 Developer Guide./
    --
    -- The default value is NONE. Valid values include NONE, PRIVATE,
    -- PUBLIC_READ, PUBLIC_READ_WRITE, AUTHENTICATED_READ, AWS_EXEC_READ,
    -- BUCKET_OWNER_READ, and BUCKET_OWNER_FULL_CONTROL.
    S3Settings -> Maybe CannedAclForObjectsValue
cannedAclForObjects :: Prelude.Maybe CannedAclForObjectsValue,
    -- | A value that enables a change data capture (CDC) load to write INSERT
    -- and UPDATE operations to .csv or .parquet (columnar storage) output
    -- files. The default setting is @false@, but when @CdcInsertsAndUpdates@
    -- is set to @true@ or @y@, only INSERTs and UPDATEs from the source
    -- database are migrated to the .csv or .parquet file.
    --
    -- For .csv file format only, how these INSERTs and UPDATEs are recorded
    -- depends on the value of the @IncludeOpForFullLoad@ parameter. If
    -- @IncludeOpForFullLoad@ is set to @true@, the first field of every CDC
    -- record is set to either @I@ or @U@ to indicate INSERT and UPDATE
    -- operations at the source. But if @IncludeOpForFullLoad@ is set to
    -- @false@, CDC records are written without an indication of INSERT or
    -- UPDATE operations at the source. For more information about how these
    -- settings work together, see
    -- <https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html#CHAP_Target.S3.Configuring.InsertOps Indicating Source DB Operations in Migrated S3 Data>
    -- in the /Database Migration Service User Guide./.
    --
    -- DMS supports the use of the @CdcInsertsAndUpdates@ parameter in versions
    -- 3.3.1 and later.
    --
    -- @CdcInsertsOnly@ and @CdcInsertsAndUpdates@ can\'t both be set to @true@
    -- for the same endpoint. Set either @CdcInsertsOnly@ or
    -- @CdcInsertsAndUpdates@ to @true@ for the same endpoint, but not both.
    S3Settings -> Maybe Bool
cdcInsertsAndUpdates :: Prelude.Maybe Prelude.Bool,
    -- | A value that enables a change data capture (CDC) load to write only
    -- INSERT operations to .csv or columnar storage (.parquet) output files.
    -- By default (the @false@ setting), the first field in a .csv or .parquet
    -- record contains the letter I (INSERT), U (UPDATE), or D (DELETE). These
    -- values indicate whether the row was inserted, updated, or deleted at the
    -- source database for a CDC load to the target.
    --
    -- If @CdcInsertsOnly@ is set to @true@ or @y@, only INSERTs from the
    -- source database are migrated to the .csv or .parquet file. For .csv
    -- format only, how these INSERTs are recorded depends on the value of
    -- @IncludeOpForFullLoad@. If @IncludeOpForFullLoad@ is set to @true@, the
    -- first field of every CDC record is set to I to indicate the INSERT
    -- operation at the source. If @IncludeOpForFullLoad@ is set to @false@,
    -- every CDC record is written without a first field to indicate the INSERT
    -- operation at the source. For more information about how these settings
    -- work together, see
    -- <https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html#CHAP_Target.S3.Configuring.InsertOps Indicating Source DB Operations in Migrated S3 Data>
    -- in the /Database Migration Service User Guide./.
    --
    -- DMS supports the interaction described preceding between the
    -- @CdcInsertsOnly@ and @IncludeOpForFullLoad@ parameters in versions 3.1.4
    -- and later.
    --
    -- @CdcInsertsOnly@ and @CdcInsertsAndUpdates@ can\'t both be set to @true@
    -- for the same endpoint. Set either @CdcInsertsOnly@ or
    -- @CdcInsertsAndUpdates@ to @true@ for the same endpoint, but not both.
    S3Settings -> Maybe Bool
cdcInsertsOnly :: Prelude.Maybe Prelude.Bool,
    -- | Maximum length of the interval, defined in seconds, after which to
    -- output a file to Amazon S3.
    --
    -- When @CdcMaxBatchInterval@ and @CdcMinFileSize@ are both specified, the
    -- file write is triggered by whichever parameter condition is met first
    -- within an DMS CloudFormation template.
    --
    -- The default value is 60 seconds.
    S3Settings -> Maybe Int
cdcMaxBatchInterval :: Prelude.Maybe Prelude.Int,
    -- | Minimum file size, defined in kilobytes, to reach for a file output to
    -- Amazon S3.
    --
    -- When @CdcMinFileSize@ and @CdcMaxBatchInterval@ are both specified, the
    -- file write is triggered by whichever parameter condition is met first
    -- within an DMS CloudFormation template.
    --
    -- The default value is 32 MB.
    S3Settings -> Maybe Int
cdcMinFileSize :: Prelude.Maybe Prelude.Int,
    -- | Specifies the folder path of CDC files. For an S3 source, this setting
    -- is required if a task captures change data; otherwise, it\'s optional.
    -- If @CdcPath@ is set, DMS reads CDC files from this path and replicates
    -- the data changes to the target endpoint. For an S3 target if you set
    -- <https://docs.aws.amazon.com/dms/latest/APIReference/API_S3Settings.html#DMS-Type-S3Settings-PreserveTransactions PreserveTransactions>
    -- to @true@, DMS verifies that you have set this parameter to a folder
    -- path on your S3 target where DMS can save the transaction order for the
    -- CDC load. DMS creates this CDC folder path in either your S3 target
    -- working directory or the S3 target location specified by
    -- <https://docs.aws.amazon.com/dms/latest/APIReference/API_S3Settings.html#DMS-Type-S3Settings-BucketFolder BucketFolder>
    -- and
    -- <https://docs.aws.amazon.com/dms/latest/APIReference/API_S3Settings.html#DMS-Type-S3Settings-BucketName BucketName>
    -- .
    --
    -- For example, if you specify @CdcPath@ as @MyChangedData@, and you
    -- specify @BucketName@ as @MyTargetBucket@ but do not specify
    -- @BucketFolder@, DMS creates the CDC folder path following:
    -- @MyTargetBucket\/MyChangedData@.
    --
    -- If you specify the same @CdcPath@, and you specify @BucketName@ as
    -- @MyTargetBucket@ and @BucketFolder@ as @MyTargetData@, DMS creates the
    -- CDC folder path following:
    -- @MyTargetBucket\/MyTargetData\/MyChangedData@.
    --
    -- For more information on CDC including transaction order on an S3 target,
    -- see
    -- <https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html#CHAP_Target.S3.EndpointSettings.CdcPath Capturing data changes (CDC) including transaction order on the S3 target>.
    --
    -- This setting is supported in DMS versions 3.4.2 and later.
    S3Settings -> Maybe Text
cdcPath :: Prelude.Maybe Prelude.Text,
    -- | An optional parameter to use GZIP to compress the target files. Set to
    -- GZIP to compress the target files. Either set this parameter to NONE
    -- (the default) or don\'t use it to leave the files uncompressed. This
    -- parameter applies to both .csv and .parquet file formats.
    S3Settings -> Maybe CompressionTypeValue
compressionType :: Prelude.Maybe CompressionTypeValue,
    -- | The delimiter used to separate columns in the .csv file for both source
    -- and target. The default is a comma.
    S3Settings -> Maybe Text
csvDelimiter :: Prelude.Maybe Prelude.Text,
    -- | This setting only applies if your Amazon S3 output files during a change
    -- data capture (CDC) load are written in .csv format. If
    -- <https://docs.aws.amazon.com/dms/latest/APIReference/API_S3Settings.html#DMS-Type-S3Settings-UseCsvNoSupValue UseCsvNoSupValue>
    -- is set to true, specify a string value that you want DMS to use for all
    -- columns not included in the supplemental log. If you do not specify a
    -- string value, DMS uses the null value for these columns regardless of
    -- the @UseCsvNoSupValue@ setting.
    --
    -- This setting is supported in DMS versions 3.4.1 and later.
    S3Settings -> Maybe Text
csvNoSupValue :: Prelude.Maybe Prelude.Text,
    -- | An optional parameter that specifies how DMS treats null values. While
    -- handling the null value, you can use this parameter to pass a
    -- user-defined string as null when writing to the target. For example,
    -- when target columns are not nullable, you can use this option to
    -- differentiate between the empty string value and the null value. So, if
    -- you set this parameter value to the empty string (\"\" or \'\'), DMS
    -- treats the empty string as the null value instead of @NULL@.
    --
    -- The default value is @NULL@. Valid values include any valid string.
    S3Settings -> Maybe Text
csvNullValue :: Prelude.Maybe Prelude.Text,
    -- | The delimiter used to separate rows in the .csv file for both source and
    -- target. The default is a carriage return (@\\n@).
    S3Settings -> Maybe Text
csvRowDelimiter :: Prelude.Maybe Prelude.Text,
    -- | The format of the data that you want to use for output. You can choose
    -- one of the following:
    --
    -- -   @csv@ : This is a row-based file format with comma-separated values
    --     (.csv).
    --
    -- -   @parquet@ : Apache Parquet (.parquet) is a columnar storage file
    --     format that features efficient compression and provides faster query
    --     response.
    S3Settings -> Maybe DataFormatValue
dataFormat :: Prelude.Maybe DataFormatValue,
    -- | The size of one data page in bytes. This parameter defaults to 1024 *
    -- 1024 bytes (1 MiB). This number is used for .parquet file format only.
    S3Settings -> Maybe Int
dataPageSize :: Prelude.Maybe Prelude.Int,
    -- | Specifies a date separating delimiter to use during folder partitioning.
    -- The default value is @SLASH@. Use this parameter when
    -- @DatePartitionedEnabled@ is set to @true@.
    S3Settings -> Maybe DatePartitionDelimiterValue
datePartitionDelimiter :: Prelude.Maybe DatePartitionDelimiterValue,
    -- | When set to @true@, this parameter partitions S3 bucket folders based on
    -- transaction commit dates. The default value is @false@. For more
    -- information about date-based folder partitioning, see
    -- <https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html#CHAP_Target.S3.DatePartitioning Using date-based folder partitioning>.
    S3Settings -> Maybe Bool
datePartitionEnabled :: Prelude.Maybe Prelude.Bool,
    -- | Identifies the sequence of the date format to use during folder
    -- partitioning. The default value is @YYYYMMDD@. Use this parameter when
    -- @DatePartitionedEnabled@ is set to @true@.
    S3Settings -> Maybe DatePartitionSequenceValue
datePartitionSequence :: Prelude.Maybe DatePartitionSequenceValue,
    -- | When creating an S3 target endpoint, set @DatePartitionTimezone@ to
    -- convert the current UTC time into a specified time zone. The conversion
    -- occurs when a date partition folder is created and a CDC filename is
    -- generated. The time zone format is Area\/Location. Use this parameter
    -- when @DatePartitionedEnabled@ is set to @true@, as shown in the
    -- following example.
    --
    -- @s3-settings=\'{\"DatePartitionEnabled\": true, \"DatePartitionSequence\": \"YYYYMMDDHH\", \"DatePartitionDelimiter\": \"SLASH\", \"DatePartitionTimezone\":\"@/@Asia\/Seoul@/@\", \"BucketName\": \"dms-nattarat-test\"}\'@
    S3Settings -> Maybe Text
datePartitionTimezone :: Prelude.Maybe Prelude.Text,
    -- | The maximum size of an encoded dictionary page of a column. If the
    -- dictionary page exceeds this, this column is stored using an encoding
    -- type of @PLAIN@. This parameter defaults to 1024 * 1024 bytes (1 MiB),
    -- the maximum size of a dictionary page before it reverts to @PLAIN@
    -- encoding. This size is used for .parquet file format only.
    S3Settings -> Maybe Int
dictPageSizeLimit :: Prelude.Maybe Prelude.Int,
    -- | A value that enables statistics for Parquet pages and row groups. Choose
    -- @true@ to enable statistics, @false@ to disable. Statistics include
    -- @NULL@, @DISTINCT@, @MAX@, and @MIN@ values. This parameter defaults to
    -- @true@. This value is used for .parquet file format only.
    S3Settings -> Maybe Bool
enableStatistics :: Prelude.Maybe Prelude.Bool,
    -- | The type of encoding you are using:
    --
    -- -   @RLE_DICTIONARY@ uses a combination of bit-packing and run-length
    --     encoding to store repeated values more efficiently. This is the
    --     default.
    --
    -- -   @PLAIN@ doesn\'t use encoding at all. Values are stored as they are.
    --
    -- -   @PLAIN_DICTIONARY@ builds a dictionary of the values encountered in
    --     a given column. The dictionary is stored in a dictionary page for
    --     each column chunk.
    S3Settings -> Maybe EncodingTypeValue
encodingType :: Prelude.Maybe EncodingTypeValue,
    -- | The type of server-side encryption that you want to use for your data.
    -- This encryption type is part of the endpoint settings or the extra
    -- connections attributes for Amazon S3. You can choose either @SSE_S3@
    -- (the default) or @SSE_KMS@.
    --
    -- For the @ModifyEndpoint@ operation, you can change the existing value of
    -- the @EncryptionMode@ parameter from @SSE_KMS@ to @SSE_S3@. But you can’t
    -- change the existing value from @SSE_S3@ to @SSE_KMS@.
    --
    -- To use @SSE_S3@, you need an Identity and Access Management (IAM) role
    -- with permission to allow @\"arn:aws:s3:::dms-*\"@ to use the following
    -- actions:
    --
    -- -   @s3:CreateBucket@
    --
    -- -   @s3:ListBucket@
    --
    -- -   @s3:DeleteBucket@
    --
    -- -   @s3:GetBucketLocation@
    --
    -- -   @s3:GetObject@
    --
    -- -   @s3:PutObject@
    --
    -- -   @s3:DeleteObject@
    --
    -- -   @s3:GetObjectVersion@
    --
    -- -   @s3:GetBucketPolicy@
    --
    -- -   @s3:PutBucketPolicy@
    --
    -- -   @s3:DeleteBucketPolicy@
    S3Settings -> Maybe EncryptionModeValue
encryptionMode :: Prelude.Maybe EncryptionModeValue,
    -- | To specify a bucket owner and prevent sniping, you can use the
    -- @ExpectedBucketOwner@ endpoint setting.
    --
    -- Example:
    -- @--s3-settings=\'{\"ExpectedBucketOwner\": \"@/@AWS_Account_ID@/@\"}\'@
    --
    -- When you make a request to test a connection or perform a migration, S3
    -- checks the account ID of the bucket owner against the specified
    -- parameter.
    S3Settings -> Maybe Text
expectedBucketOwner :: Prelude.Maybe Prelude.Text,
    -- | Specifies how tables are defined in the S3 source files only.
    S3Settings -> Maybe Text
externalTableDefinition :: Prelude.Maybe Prelude.Text,
    -- | When this value is set to 1, DMS ignores the first row header in a .csv
    -- file. A value of 1 turns on the feature; a value of 0 turns off the
    -- feature.
    --
    -- The default is 0.
    S3Settings -> Maybe Int
ignoreHeaderRows :: Prelude.Maybe Prelude.Int,
    -- | A value that enables a full load to write INSERT operations to the
    -- comma-separated value (.csv) output files only to indicate how the rows
    -- were added to the source database.
    --
    -- DMS supports the @IncludeOpForFullLoad@ parameter in versions 3.1.4 and
    -- later.
    --
    -- For full load, records can only be inserted. By default (the @false@
    -- setting), no information is recorded in these output files for a full
    -- load to indicate that the rows were inserted at the source database. If
    -- @IncludeOpForFullLoad@ is set to @true@ or @y@, the INSERT is recorded
    -- as an I annotation in the first field of the .csv file. This allows the
    -- format of your target records from a full load to be consistent with the
    -- target records from a CDC load.
    --
    -- This setting works together with the @CdcInsertsOnly@ and the
    -- @CdcInsertsAndUpdates@ parameters for output to .csv files only. For
    -- more information about how these settings work together, see
    -- <https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html#CHAP_Target.S3.Configuring.InsertOps Indicating Source DB Operations in Migrated S3 Data>
    -- in the /Database Migration Service User Guide./.
    S3Settings -> Maybe Bool
includeOpForFullLoad :: Prelude.Maybe Prelude.Bool,
    -- | A value that specifies the maximum size (in KB) of any .csv file to be
    -- created while migrating to an S3 target during full load.
    --
    -- The default value is 1,048,576 KB (1 GB). Valid values include 1 to
    -- 1,048,576.
    S3Settings -> Maybe Int
maxFileSize :: Prelude.Maybe Prelude.Int,
    -- | A value that specifies the precision of any @TIMESTAMP@ column values
    -- that are written to an Amazon S3 object file in .parquet format.
    --
    -- DMS supports the @ParquetTimestampInMillisecond@ parameter in versions
    -- 3.1.4 and later.
    --
    -- When @ParquetTimestampInMillisecond@ is set to @true@ or @y@, DMS writes
    -- all @TIMESTAMP@ columns in a .parquet formatted file with millisecond
    -- precision. Otherwise, DMS writes them with microsecond precision.
    --
    -- Currently, Amazon Athena and Glue can handle only millisecond precision
    -- for @TIMESTAMP@ values. Set this parameter to @true@ for S3 endpoint
    -- object files that are .parquet formatted only if you plan to query or
    -- process the data with Athena or Glue.
    --
    -- DMS writes any @TIMESTAMP@ column values written to an S3 file in .csv
    -- format with microsecond precision.
    --
    -- Setting @ParquetTimestampInMillisecond@ has no effect on the string
    -- format of the timestamp column value that is inserted by setting the
    -- @TimestampColumnName@ parameter.
    S3Settings -> Maybe Bool
parquetTimestampInMillisecond :: Prelude.Maybe Prelude.Bool,
    -- | The version of the Apache Parquet format that you want to use:
    -- @parquet_1_0@ (the default) or @parquet_2_0@.
    S3Settings -> Maybe ParquetVersionValue
parquetVersion :: Prelude.Maybe ParquetVersionValue,
    -- | If set to @true@, DMS saves the transaction order for a change data
    -- capture (CDC) load on the Amazon S3 target specified by
    -- <https://docs.aws.amazon.com/dms/latest/APIReference/API_S3Settings.html#DMS-Type-S3Settings-CdcPath CdcPath>
    -- . For more information, see
    -- <https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html#CHAP_Target.S3.EndpointSettings.CdcPath Capturing data changes (CDC) including transaction order on the S3 target>.
    --
    -- This setting is supported in DMS versions 3.4.2 and later.
    S3Settings -> Maybe Bool
preserveTransactions :: Prelude.Maybe Prelude.Bool,
    -- | For an S3 source, when this value is set to @true@ or @y@, each leading
    -- double quotation mark has to be followed by an ending double quotation
    -- mark. This formatting complies with RFC 4180. When this value is set to
    -- @false@ or @n@, string literals are copied to the target as is. In this
    -- case, a delimiter (row or column) signals the end of the field. Thus,
    -- you can\'t use a delimiter as part of the string, because it signals the
    -- end of the value.
    --
    -- For an S3 target, an optional parameter used to set behavior to comply
    -- with RFC 4180 for data migrated to Amazon S3 using .csv file format
    -- only. When this value is set to @true@ or @y@ using Amazon S3 as a
    -- target, if the data has quotation marks or newline characters in it, DMS
    -- encloses the entire column with an additional pair of double quotation
    -- marks (\"). Every quotation mark within the data is repeated twice.
    --
    -- The default value is @true@. Valid values include @true@, @false@, @y@,
    -- and @n@.
    S3Settings -> Maybe Bool
rfc4180 :: Prelude.Maybe Prelude.Bool,
    -- | The number of rows in a row group. A smaller row group size provides
    -- faster reads. But as the number of row groups grows, the slower writes
    -- become. This parameter defaults to 10,000 rows. This number is used for
    -- .parquet file format only.
    --
    -- If you choose a value larger than the maximum, @RowGroupLength@ is set
    -- to the max row group length in bytes (64 * 1024 * 1024).
    S3Settings -> Maybe Int
rowGroupLength :: Prelude.Maybe Prelude.Int,
    -- | If you are using @SSE_KMS@ for the @EncryptionMode@, provide the KMS key
    -- ID. The key that you use needs an attached policy that enables Identity
    -- and Access Management (IAM) user permissions and allows use of the key.
    --
    -- Here is a CLI example:
    -- @aws dms create-endpoint --endpoint-identifier @/@value@/@ --endpoint-type target --engine-name s3 --s3-settings ServiceAccessRoleArn=@/@value@/@,BucketFolder=@/@value@/@,BucketName=@/@value@/@,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=@/@value@/@ @
    S3Settings -> Maybe Text
serverSideEncryptionKmsKeyId :: Prelude.Maybe Prelude.Text,
    -- | The Amazon Resource Name (ARN) used by the service to access the IAM
    -- role. The role must allow the @iam:PassRole@ action. It is a required
    -- parameter that enables DMS to write and read objects from an S3 bucket.
    S3Settings -> Maybe Text
serviceAccessRoleArn :: Prelude.Maybe Prelude.Text,
    -- | A value that when nonblank causes DMS to add a column with timestamp
    -- information to the endpoint data for an Amazon S3 target.
    --
    -- DMS supports the @TimestampColumnName@ parameter in versions 3.1.4 and
    -- later.
    --
    -- DMS includes an additional @STRING@ column in the .csv or .parquet
    -- object files of your migrated data when you set @TimestampColumnName@ to
    -- a nonblank value.
    --
    -- For a full load, each row of this timestamp column contains a timestamp
    -- for when the data was transferred from the source to the target by DMS.
    --
    -- For a change data capture (CDC) load, each row of the timestamp column
    -- contains the timestamp for the commit of that row in the source
    -- database.
    --
    -- The string format for this timestamp column value is
    -- @yyyy-MM-dd HH:mm:ss.SSSSSS@. By default, the precision of this value is
    -- in microseconds. For a CDC load, the rounding of the precision depends
    -- on the commit timestamp supported by DMS for the source database.
    --
    -- When the @AddColumnName@ parameter is set to @true@, DMS also includes a
    -- name for the timestamp column that you set with @TimestampColumnName@.
    S3Settings -> Maybe Text
timestampColumnName :: Prelude.Maybe Prelude.Text,
    -- | This setting applies if the S3 output files during a change data capture
    -- (CDC) load are written in .csv format. If set to @true@ for columns not
    -- included in the supplemental log, DMS uses the value specified by
    -- <https://docs.aws.amazon.com/dms/latest/APIReference/API_S3Settings.html#DMS-Type-S3Settings-CsvNoSupValue CsvNoSupValue>
    -- . If not set or set to @false@, DMS uses the null value for these
    -- columns.
    --
    -- This setting is supported in DMS versions 3.4.1 and later.
    S3Settings -> Maybe Bool
useCsvNoSupValue :: Prelude.Maybe Prelude.Bool,
    -- | When set to true, this parameter uses the task start time as the
    -- timestamp column value instead of the time data is written to target.
    -- For full load, when @useTaskStartTimeForFullLoadTimestamp@ is set to
    -- @true@, each row of the timestamp column contains the task start time.
    -- For CDC loads, each row of the timestamp column contains the transaction
    -- commit time.
    --
    -- When @useTaskStartTimeForFullLoadTimestamp@ is set to @false@, the full
    -- load timestamp in the timestamp column increments with the time data
    -- arrives at the target.
    S3Settings -> Maybe Bool
useTaskStartTimeForFullLoadTimestamp :: Prelude.Maybe Prelude.Bool
  }
  deriving (S3Settings -> S3Settings -> Bool
forall a. (a -> a -> Bool) -> (a -> a -> Bool) -> Eq a
/= :: S3Settings -> S3Settings -> Bool
$c/= :: S3Settings -> S3Settings -> Bool
== :: S3Settings -> S3Settings -> Bool
$c== :: S3Settings -> S3Settings -> Bool
Prelude.Eq, ReadPrec [S3Settings]
ReadPrec S3Settings
Int -> ReadS S3Settings
ReadS [S3Settings]
forall a.
(Int -> ReadS a)
-> ReadS [a] -> ReadPrec a -> ReadPrec [a] -> Read a
readListPrec :: ReadPrec [S3Settings]
$creadListPrec :: ReadPrec [S3Settings]
readPrec :: ReadPrec S3Settings
$creadPrec :: ReadPrec S3Settings
readList :: ReadS [S3Settings]
$creadList :: ReadS [S3Settings]
readsPrec :: Int -> ReadS S3Settings
$creadsPrec :: Int -> ReadS S3Settings
Prelude.Read, Int -> S3Settings -> ShowS
[S3Settings] -> ShowS
S3Settings -> String
forall a.
(Int -> a -> ShowS) -> (a -> String) -> ([a] -> ShowS) -> Show a
showList :: [S3Settings] -> ShowS
$cshowList :: [S3Settings] -> ShowS
show :: S3Settings -> String
$cshow :: S3Settings -> String
showsPrec :: Int -> S3Settings -> ShowS
$cshowsPrec :: Int -> S3Settings -> ShowS
Prelude.Show, forall x. Rep S3Settings x -> S3Settings
forall x. S3Settings -> Rep S3Settings x
forall a.
(forall x. a -> Rep a x) -> (forall x. Rep a x -> a) -> Generic a
$cto :: forall x. Rep S3Settings x -> S3Settings
$cfrom :: forall x. S3Settings -> Rep S3Settings x
Prelude.Generic)

-- |
-- Create a value of 'S3Settings' with all optional fields omitted.
--
-- Use <https://hackage.haskell.org/package/generic-lens generic-lens> or <https://hackage.haskell.org/package/optics optics> to modify other optional fields.
--
-- The following record fields are available, with the corresponding lenses provided
-- for backwards compatibility:
--
-- 'addColumnName', 's3Settings_addColumnName' - An optional parameter that, when set to @true@ or @y@, you can use to
-- add column name information to the .csv output file.
--
-- The default value is @false@. Valid values are @true@, @false@, @y@, and
-- @n@.
--
-- 'addTrailingPaddingCharacter', 's3Settings_addTrailingPaddingCharacter' - Use the S3 target endpoint setting @AddTrailingPaddingCharacter@ to add
-- padding on string data. The default value is @false@.
--
-- 'bucketFolder', 's3Settings_bucketFolder' - An optional parameter to set a folder name in the S3 bucket. If
-- provided, tables are created in the path
-- @ @/@bucketFolder@/@\/@/@schema_name@/@\/@/@table_name@/@\/@. If this
-- parameter isn\'t specified, then the path used is
-- @ @/@schema_name@/@\/@/@table_name@/@\/@.
--
-- 'bucketName', 's3Settings_bucketName' - The name of the S3 bucket.
--
-- 'cannedAclForObjects', 's3Settings_cannedAclForObjects' - A value that enables DMS to specify a predefined (canned) access control
-- list for objects created in an Amazon S3 bucket as .csv or .parquet
-- files. For more information about Amazon S3 canned ACLs, see
-- <http://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl Canned ACL>
-- in the /Amazon S3 Developer Guide./
--
-- The default value is NONE. Valid values include NONE, PRIVATE,
-- PUBLIC_READ, PUBLIC_READ_WRITE, AUTHENTICATED_READ, AWS_EXEC_READ,
-- BUCKET_OWNER_READ, and BUCKET_OWNER_FULL_CONTROL.
--
-- 'cdcInsertsAndUpdates', 's3Settings_cdcInsertsAndUpdates' - A value that enables a change data capture (CDC) load to write INSERT
-- and UPDATE operations to .csv or .parquet (columnar storage) output
-- files. The default setting is @false@, but when @CdcInsertsAndUpdates@
-- is set to @true@ or @y@, only INSERTs and UPDATEs from the source
-- database are migrated to the .csv or .parquet file.
--
-- For .csv file format only, how these INSERTs and UPDATEs are recorded
-- depends on the value of the @IncludeOpForFullLoad@ parameter. If
-- @IncludeOpForFullLoad@ is set to @true@, the first field of every CDC
-- record is set to either @I@ or @U@ to indicate INSERT and UPDATE
-- operations at the source. But if @IncludeOpForFullLoad@ is set to
-- @false@, CDC records are written without an indication of INSERT or
-- UPDATE operations at the source. For more information about how these
-- settings work together, see
-- <https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html#CHAP_Target.S3.Configuring.InsertOps Indicating Source DB Operations in Migrated S3 Data>
-- in the /Database Migration Service User Guide./.
--
-- DMS supports the use of the @CdcInsertsAndUpdates@ parameter in versions
-- 3.3.1 and later.
--
-- @CdcInsertsOnly@ and @CdcInsertsAndUpdates@ can\'t both be set to @true@
-- for the same endpoint. Set either @CdcInsertsOnly@ or
-- @CdcInsertsAndUpdates@ to @true@ for the same endpoint, but not both.
--
-- 'cdcInsertsOnly', 's3Settings_cdcInsertsOnly' - A value that enables a change data capture (CDC) load to write only
-- INSERT operations to .csv or columnar storage (.parquet) output files.
-- By default (the @false@ setting), the first field in a .csv or .parquet
-- record contains the letter I (INSERT), U (UPDATE), or D (DELETE). These
-- values indicate whether the row was inserted, updated, or deleted at the
-- source database for a CDC load to the target.
--
-- If @CdcInsertsOnly@ is set to @true@ or @y@, only INSERTs from the
-- source database are migrated to the .csv or .parquet file. For .csv
-- format only, how these INSERTs are recorded depends on the value of
-- @IncludeOpForFullLoad@. If @IncludeOpForFullLoad@ is set to @true@, the
-- first field of every CDC record is set to I to indicate the INSERT
-- operation at the source. If @IncludeOpForFullLoad@ is set to @false@,
-- every CDC record is written without a first field to indicate the INSERT
-- operation at the source. For more information about how these settings
-- work together, see
-- <https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html#CHAP_Target.S3.Configuring.InsertOps Indicating Source DB Operations in Migrated S3 Data>
-- in the /Database Migration Service User Guide./.
--
-- DMS supports the interaction described preceding between the
-- @CdcInsertsOnly@ and @IncludeOpForFullLoad@ parameters in versions 3.1.4
-- and later.
--
-- @CdcInsertsOnly@ and @CdcInsertsAndUpdates@ can\'t both be set to @true@
-- for the same endpoint. Set either @CdcInsertsOnly@ or
-- @CdcInsertsAndUpdates@ to @true@ for the same endpoint, but not both.
--
-- 'cdcMaxBatchInterval', 's3Settings_cdcMaxBatchInterval' - Maximum length of the interval, defined in seconds, after which to
-- output a file to Amazon S3.
--
-- When @CdcMaxBatchInterval@ and @CdcMinFileSize@ are both specified, the
-- file write is triggered by whichever parameter condition is met first
-- within an DMS CloudFormation template.
--
-- The default value is 60 seconds.
--
-- 'cdcMinFileSize', 's3Settings_cdcMinFileSize' - Minimum file size, defined in kilobytes, to reach for a file output to
-- Amazon S3.
--
-- When @CdcMinFileSize@ and @CdcMaxBatchInterval@ are both specified, the
-- file write is triggered by whichever parameter condition is met first
-- within an DMS CloudFormation template.
--
-- The default value is 32 MB.
--
-- 'cdcPath', 's3Settings_cdcPath' - Specifies the folder path of CDC files. For an S3 source, this setting
-- is required if a task captures change data; otherwise, it\'s optional.
-- If @CdcPath@ is set, DMS reads CDC files from this path and replicates
-- the data changes to the target endpoint. For an S3 target if you set
-- <https://docs.aws.amazon.com/dms/latest/APIReference/API_S3Settings.html#DMS-Type-S3Settings-PreserveTransactions PreserveTransactions>
-- to @true@, DMS verifies that you have set this parameter to a folder
-- path on your S3 target where DMS can save the transaction order for the
-- CDC load. DMS creates this CDC folder path in either your S3 target
-- working directory or the S3 target location specified by
-- <https://docs.aws.amazon.com/dms/latest/APIReference/API_S3Settings.html#DMS-Type-S3Settings-BucketFolder BucketFolder>
-- and
-- <https://docs.aws.amazon.com/dms/latest/APIReference/API_S3Settings.html#DMS-Type-S3Settings-BucketName BucketName>
-- .
--
-- For example, if you specify @CdcPath@ as @MyChangedData@, and you
-- specify @BucketName@ as @MyTargetBucket@ but do not specify
-- @BucketFolder@, DMS creates the CDC folder path following:
-- @MyTargetBucket\/MyChangedData@.
--
-- If you specify the same @CdcPath@, and you specify @BucketName@ as
-- @MyTargetBucket@ and @BucketFolder@ as @MyTargetData@, DMS creates the
-- CDC folder path following:
-- @MyTargetBucket\/MyTargetData\/MyChangedData@.
--
-- For more information on CDC including transaction order on an S3 target,
-- see
-- <https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html#CHAP_Target.S3.EndpointSettings.CdcPath Capturing data changes (CDC) including transaction order on the S3 target>.
--
-- This setting is supported in DMS versions 3.4.2 and later.
--
-- 'compressionType', 's3Settings_compressionType' - An optional parameter to use GZIP to compress the target files. Set to
-- GZIP to compress the target files. Either set this parameter to NONE
-- (the default) or don\'t use it to leave the files uncompressed. This
-- parameter applies to both .csv and .parquet file formats.
--
-- 'csvDelimiter', 's3Settings_csvDelimiter' - The delimiter used to separate columns in the .csv file for both source
-- and target. The default is a comma.
--
-- 'csvNoSupValue', 's3Settings_csvNoSupValue' - This setting only applies if your Amazon S3 output files during a change
-- data capture (CDC) load are written in .csv format. If
-- <https://docs.aws.amazon.com/dms/latest/APIReference/API_S3Settings.html#DMS-Type-S3Settings-UseCsvNoSupValue UseCsvNoSupValue>
-- is set to true, specify a string value that you want DMS to use for all
-- columns not included in the supplemental log. If you do not specify a
-- string value, DMS uses the null value for these columns regardless of
-- the @UseCsvNoSupValue@ setting.
--
-- This setting is supported in DMS versions 3.4.1 and later.
--
-- 'csvNullValue', 's3Settings_csvNullValue' - An optional parameter that specifies how DMS treats null values. While
-- handling the null value, you can use this parameter to pass a
-- user-defined string as null when writing to the target. For example,
-- when target columns are not nullable, you can use this option to
-- differentiate between the empty string value and the null value. So, if
-- you set this parameter value to the empty string (\"\" or \'\'), DMS
-- treats the empty string as the null value instead of @NULL@.
--
-- The default value is @NULL@. Valid values include any valid string.
--
-- 'csvRowDelimiter', 's3Settings_csvRowDelimiter' - The delimiter used to separate rows in the .csv file for both source and
-- target. The default is a carriage return (@\\n@).
--
-- 'dataFormat', 's3Settings_dataFormat' - The format of the data that you want to use for output. You can choose
-- one of the following:
--
-- -   @csv@ : This is a row-based file format with comma-separated values
--     (.csv).
--
-- -   @parquet@ : Apache Parquet (.parquet) is a columnar storage file
--     format that features efficient compression and provides faster query
--     response.
--
-- 'dataPageSize', 's3Settings_dataPageSize' - The size of one data page in bytes. This parameter defaults to 1024 *
-- 1024 bytes (1 MiB). This number is used for .parquet file format only.
--
-- 'datePartitionDelimiter', 's3Settings_datePartitionDelimiter' - Specifies a date separating delimiter to use during folder partitioning.
-- The default value is @SLASH@. Use this parameter when
-- @DatePartitionedEnabled@ is set to @true@.
--
-- 'datePartitionEnabled', 's3Settings_datePartitionEnabled' - When set to @true@, this parameter partitions S3 bucket folders based on
-- transaction commit dates. The default value is @false@. For more
-- information about date-based folder partitioning, see
-- <https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html#CHAP_Target.S3.DatePartitioning Using date-based folder partitioning>.
--
-- 'datePartitionSequence', 's3Settings_datePartitionSequence' - Identifies the sequence of the date format to use during folder
-- partitioning. The default value is @YYYYMMDD@. Use this parameter when
-- @DatePartitionedEnabled@ is set to @true@.
--
-- 'datePartitionTimezone', 's3Settings_datePartitionTimezone' - When creating an S3 target endpoint, set @DatePartitionTimezone@ to
-- convert the current UTC time into a specified time zone. The conversion
-- occurs when a date partition folder is created and a CDC filename is
-- generated. The time zone format is Area\/Location. Use this parameter
-- when @DatePartitionedEnabled@ is set to @true@, as shown in the
-- following example.
--
-- @s3-settings=\'{\"DatePartitionEnabled\": true, \"DatePartitionSequence\": \"YYYYMMDDHH\", \"DatePartitionDelimiter\": \"SLASH\", \"DatePartitionTimezone\":\"@/@Asia\/Seoul@/@\", \"BucketName\": \"dms-nattarat-test\"}\'@
--
-- 'dictPageSizeLimit', 's3Settings_dictPageSizeLimit' - The maximum size of an encoded dictionary page of a column. If the
-- dictionary page exceeds this, this column is stored using an encoding
-- type of @PLAIN@. This parameter defaults to 1024 * 1024 bytes (1 MiB),
-- the maximum size of a dictionary page before it reverts to @PLAIN@
-- encoding. This size is used for .parquet file format only.
--
-- 'enableStatistics', 's3Settings_enableStatistics' - A value that enables statistics for Parquet pages and row groups. Choose
-- @true@ to enable statistics, @false@ to disable. Statistics include
-- @NULL@, @DISTINCT@, @MAX@, and @MIN@ values. This parameter defaults to
-- @true@. This value is used for .parquet file format only.
--
-- 'encodingType', 's3Settings_encodingType' - The type of encoding you are using:
--
-- -   @RLE_DICTIONARY@ uses a combination of bit-packing and run-length
--     encoding to store repeated values more efficiently. This is the
--     default.
--
-- -   @PLAIN@ doesn\'t use encoding at all. Values are stored as they are.
--
-- -   @PLAIN_DICTIONARY@ builds a dictionary of the values encountered in
--     a given column. The dictionary is stored in a dictionary page for
--     each column chunk.
--
-- 'encryptionMode', 's3Settings_encryptionMode' - The type of server-side encryption that you want to use for your data.
-- This encryption type is part of the endpoint settings or the extra
-- connections attributes for Amazon S3. You can choose either @SSE_S3@
-- (the default) or @SSE_KMS@.
--
-- For the @ModifyEndpoint@ operation, you can change the existing value of
-- the @EncryptionMode@ parameter from @SSE_KMS@ to @SSE_S3@. But you can’t
-- change the existing value from @SSE_S3@ to @SSE_KMS@.
--
-- To use @SSE_S3@, you need an Identity and Access Management (IAM) role
-- with permission to allow @\"arn:aws:s3:::dms-*\"@ to use the following
-- actions:
--
-- -   @s3:CreateBucket@
--
-- -   @s3:ListBucket@
--
-- -   @s3:DeleteBucket@
--
-- -   @s3:GetBucketLocation@
--
-- -   @s3:GetObject@
--
-- -   @s3:PutObject@
--
-- -   @s3:DeleteObject@
--
-- -   @s3:GetObjectVersion@
--
-- -   @s3:GetBucketPolicy@
--
-- -   @s3:PutBucketPolicy@
--
-- -   @s3:DeleteBucketPolicy@
--
-- 'expectedBucketOwner', 's3Settings_expectedBucketOwner' - To specify a bucket owner and prevent sniping, you can use the
-- @ExpectedBucketOwner@ endpoint setting.
--
-- Example:
-- @--s3-settings=\'{\"ExpectedBucketOwner\": \"@/@AWS_Account_ID@/@\"}\'@
--
-- When you make a request to test a connection or perform a migration, S3
-- checks the account ID of the bucket owner against the specified
-- parameter.
--
-- 'externalTableDefinition', 's3Settings_externalTableDefinition' - Specifies how tables are defined in the S3 source files only.
--
-- 'ignoreHeaderRows', 's3Settings_ignoreHeaderRows' - When this value is set to 1, DMS ignores the first row header in a .csv
-- file. A value of 1 turns on the feature; a value of 0 turns off the
-- feature.
--
-- The default is 0.
--
-- 'includeOpForFullLoad', 's3Settings_includeOpForFullLoad' - A value that enables a full load to write INSERT operations to the
-- comma-separated value (.csv) output files only to indicate how the rows
-- were added to the source database.
--
-- DMS supports the @IncludeOpForFullLoad@ parameter in versions 3.1.4 and
-- later.
--
-- For full load, records can only be inserted. By default (the @false@
-- setting), no information is recorded in these output files for a full
-- load to indicate that the rows were inserted at the source database. If
-- @IncludeOpForFullLoad@ is set to @true@ or @y@, the INSERT is recorded
-- as an I annotation in the first field of the .csv file. This allows the
-- format of your target records from a full load to be consistent with the
-- target records from a CDC load.
--
-- This setting works together with the @CdcInsertsOnly@ and the
-- @CdcInsertsAndUpdates@ parameters for output to .csv files only. For
-- more information about how these settings work together, see
-- <https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html#CHAP_Target.S3.Configuring.InsertOps Indicating Source DB Operations in Migrated S3 Data>
-- in the /Database Migration Service User Guide./.
--
-- 'maxFileSize', 's3Settings_maxFileSize' - A value that specifies the maximum size (in KB) of any .csv file to be
-- created while migrating to an S3 target during full load.
--
-- The default value is 1,048,576 KB (1 GB). Valid values include 1 to
-- 1,048,576.
--
-- 'parquetTimestampInMillisecond', 's3Settings_parquetTimestampInMillisecond' - A value that specifies the precision of any @TIMESTAMP@ column values
-- that are written to an Amazon S3 object file in .parquet format.
--
-- DMS supports the @ParquetTimestampInMillisecond@ parameter in versions
-- 3.1.4 and later.
--
-- When @ParquetTimestampInMillisecond@ is set to @true@ or @y@, DMS writes
-- all @TIMESTAMP@ columns in a .parquet formatted file with millisecond
-- precision. Otherwise, DMS writes them with microsecond precision.
--
-- Currently, Amazon Athena and Glue can handle only millisecond precision
-- for @TIMESTAMP@ values. Set this parameter to @true@ for S3 endpoint
-- object files that are .parquet formatted only if you plan to query or
-- process the data with Athena or Glue.
--
-- DMS writes any @TIMESTAMP@ column values written to an S3 file in .csv
-- format with microsecond precision.
--
-- Setting @ParquetTimestampInMillisecond@ has no effect on the string
-- format of the timestamp column value that is inserted by setting the
-- @TimestampColumnName@ parameter.
--
-- 'parquetVersion', 's3Settings_parquetVersion' - The version of the Apache Parquet format that you want to use:
-- @parquet_1_0@ (the default) or @parquet_2_0@.
--
-- 'preserveTransactions', 's3Settings_preserveTransactions' - If set to @true@, DMS saves the transaction order for a change data
-- capture (CDC) load on the Amazon S3 target specified by
-- <https://docs.aws.amazon.com/dms/latest/APIReference/API_S3Settings.html#DMS-Type-S3Settings-CdcPath CdcPath>
-- . For more information, see
-- <https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html#CHAP_Target.S3.EndpointSettings.CdcPath Capturing data changes (CDC) including transaction order on the S3 target>.
--
-- This setting is supported in DMS versions 3.4.2 and later.
--
-- 'rfc4180', 's3Settings_rfc4180' - For an S3 source, when this value is set to @true@ or @y@, each leading
-- double quotation mark has to be followed by an ending double quotation
-- mark. This formatting complies with RFC 4180. When this value is set to
-- @false@ or @n@, string literals are copied to the target as is. In this
-- case, a delimiter (row or column) signals the end of the field. Thus,
-- you can\'t use a delimiter as part of the string, because it signals the
-- end of the value.
--
-- For an S3 target, an optional parameter used to set behavior to comply
-- with RFC 4180 for data migrated to Amazon S3 using .csv file format
-- only. When this value is set to @true@ or @y@ using Amazon S3 as a
-- target, if the data has quotation marks or newline characters in it, DMS
-- encloses the entire column with an additional pair of double quotation
-- marks (\"). Every quotation mark within the data is repeated twice.
--
-- The default value is @true@. Valid values include @true@, @false@, @y@,
-- and @n@.
--
-- 'rowGroupLength', 's3Settings_rowGroupLength' - The number of rows in a row group. A smaller row group size provides
-- faster reads. But as the number of row groups grows, the slower writes
-- become. This parameter defaults to 10,000 rows. This number is used for
-- .parquet file format only.
--
-- If you choose a value larger than the maximum, @RowGroupLength@ is set
-- to the max row group length in bytes (64 * 1024 * 1024).
--
-- 'serverSideEncryptionKmsKeyId', 's3Settings_serverSideEncryptionKmsKeyId' - If you are using @SSE_KMS@ for the @EncryptionMode@, provide the KMS key
-- ID. The key that you use needs an attached policy that enables Identity
-- and Access Management (IAM) user permissions and allows use of the key.
--
-- Here is a CLI example:
-- @aws dms create-endpoint --endpoint-identifier @/@value@/@ --endpoint-type target --engine-name s3 --s3-settings ServiceAccessRoleArn=@/@value@/@,BucketFolder=@/@value@/@,BucketName=@/@value@/@,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=@/@value@/@ @
--
-- 'serviceAccessRoleArn', 's3Settings_serviceAccessRoleArn' - The Amazon Resource Name (ARN) used by the service to access the IAM
-- role. The role must allow the @iam:PassRole@ action. It is a required
-- parameter that enables DMS to write and read objects from an S3 bucket.
--
-- 'timestampColumnName', 's3Settings_timestampColumnName' - A value that when nonblank causes DMS to add a column with timestamp
-- information to the endpoint data for an Amazon S3 target.
--
-- DMS supports the @TimestampColumnName@ parameter in versions 3.1.4 and
-- later.
--
-- DMS includes an additional @STRING@ column in the .csv or .parquet
-- object files of your migrated data when you set @TimestampColumnName@ to
-- a nonblank value.
--
-- For a full load, each row of this timestamp column contains a timestamp
-- for when the data was transferred from the source to the target by DMS.
--
-- For a change data capture (CDC) load, each row of the timestamp column
-- contains the timestamp for the commit of that row in the source
-- database.
--
-- The string format for this timestamp column value is
-- @yyyy-MM-dd HH:mm:ss.SSSSSS@. By default, the precision of this value is
-- in microseconds. For a CDC load, the rounding of the precision depends
-- on the commit timestamp supported by DMS for the source database.
--
-- When the @AddColumnName@ parameter is set to @true@, DMS also includes a
-- name for the timestamp column that you set with @TimestampColumnName@.
--
-- 'useCsvNoSupValue', 's3Settings_useCsvNoSupValue' - This setting applies if the S3 output files during a change data capture
-- (CDC) load are written in .csv format. If set to @true@ for columns not
-- included in the supplemental log, DMS uses the value specified by
-- <https://docs.aws.amazon.com/dms/latest/APIReference/API_S3Settings.html#DMS-Type-S3Settings-CsvNoSupValue CsvNoSupValue>
-- . If not set or set to @false@, DMS uses the null value for these
-- columns.
--
-- This setting is supported in DMS versions 3.4.1 and later.
--
-- 'useTaskStartTimeForFullLoadTimestamp', 's3Settings_useTaskStartTimeForFullLoadTimestamp' - When set to true, this parameter uses the task start time as the
-- timestamp column value instead of the time data is written to target.
-- For full load, when @useTaskStartTimeForFullLoadTimestamp@ is set to
-- @true@, each row of the timestamp column contains the task start time.
-- For CDC loads, each row of the timestamp column contains the transaction
-- commit time.
--
-- When @useTaskStartTimeForFullLoadTimestamp@ is set to @false@, the full
-- load timestamp in the timestamp column increments with the time data
-- arrives at the target.
newS3Settings ::
  S3Settings
newS3Settings :: S3Settings
newS3Settings =
  S3Settings'
    { $sel:addColumnName:S3Settings' :: Maybe Bool
addColumnName = forall a. Maybe a
Prelude.Nothing,
      $sel:addTrailingPaddingCharacter:S3Settings' :: Maybe Bool
addTrailingPaddingCharacter = forall a. Maybe a
Prelude.Nothing,
      $sel:bucketFolder:S3Settings' :: Maybe Text
bucketFolder = forall a. Maybe a
Prelude.Nothing,
      $sel:bucketName:S3Settings' :: Maybe Text
bucketName = forall a. Maybe a
Prelude.Nothing,
      $sel:cannedAclForObjects:S3Settings' :: Maybe CannedAclForObjectsValue
cannedAclForObjects = forall a. Maybe a
Prelude.Nothing,
      $sel:cdcInsertsAndUpdates:S3Settings' :: Maybe Bool
cdcInsertsAndUpdates = forall a. Maybe a
Prelude.Nothing,
      $sel:cdcInsertsOnly:S3Settings' :: Maybe Bool
cdcInsertsOnly = forall a. Maybe a
Prelude.Nothing,
      $sel:cdcMaxBatchInterval:S3Settings' :: Maybe Int
cdcMaxBatchInterval = forall a. Maybe a
Prelude.Nothing,
      $sel:cdcMinFileSize:S3Settings' :: Maybe Int
cdcMinFileSize = forall a. Maybe a
Prelude.Nothing,
      $sel:cdcPath:S3Settings' :: Maybe Text
cdcPath = forall a. Maybe a
Prelude.Nothing,
      $sel:compressionType:S3Settings' :: Maybe CompressionTypeValue
compressionType = forall a. Maybe a
Prelude.Nothing,
      $sel:csvDelimiter:S3Settings' :: Maybe Text
csvDelimiter = forall a. Maybe a
Prelude.Nothing,
      $sel:csvNoSupValue:S3Settings' :: Maybe Text
csvNoSupValue = forall a. Maybe a
Prelude.Nothing,
      $sel:csvNullValue:S3Settings' :: Maybe Text
csvNullValue = forall a. Maybe a
Prelude.Nothing,
      $sel:csvRowDelimiter:S3Settings' :: Maybe Text
csvRowDelimiter = forall a. Maybe a
Prelude.Nothing,
      $sel:dataFormat:S3Settings' :: Maybe DataFormatValue
dataFormat = forall a. Maybe a
Prelude.Nothing,
      $sel:dataPageSize:S3Settings' :: Maybe Int
dataPageSize = forall a. Maybe a
Prelude.Nothing,
      $sel:datePartitionDelimiter:S3Settings' :: Maybe DatePartitionDelimiterValue
datePartitionDelimiter = forall a. Maybe a
Prelude.Nothing,
      $sel:datePartitionEnabled:S3Settings' :: Maybe Bool
datePartitionEnabled = forall a. Maybe a
Prelude.Nothing,
      $sel:datePartitionSequence:S3Settings' :: Maybe DatePartitionSequenceValue
datePartitionSequence = forall a. Maybe a
Prelude.Nothing,
      $sel:datePartitionTimezone:S3Settings' :: Maybe Text
datePartitionTimezone = forall a. Maybe a
Prelude.Nothing,
      $sel:dictPageSizeLimit:S3Settings' :: Maybe Int
dictPageSizeLimit = forall a. Maybe a
Prelude.Nothing,
      $sel:enableStatistics:S3Settings' :: Maybe Bool
enableStatistics = forall a. Maybe a
Prelude.Nothing,
      $sel:encodingType:S3Settings' :: Maybe EncodingTypeValue
encodingType = forall a. Maybe a
Prelude.Nothing,
      $sel:encryptionMode:S3Settings' :: Maybe EncryptionModeValue
encryptionMode = forall a. Maybe a
Prelude.Nothing,
      $sel:expectedBucketOwner:S3Settings' :: Maybe Text
expectedBucketOwner = forall a. Maybe a
Prelude.Nothing,
      $sel:externalTableDefinition:S3Settings' :: Maybe Text
externalTableDefinition = forall a. Maybe a
Prelude.Nothing,
      $sel:ignoreHeaderRows:S3Settings' :: Maybe Int
ignoreHeaderRows = forall a. Maybe a
Prelude.Nothing,
      $sel:includeOpForFullLoad:S3Settings' :: Maybe Bool
includeOpForFullLoad = forall a. Maybe a
Prelude.Nothing,
      $sel:maxFileSize:S3Settings' :: Maybe Int
maxFileSize = forall a. Maybe a
Prelude.Nothing,
      $sel:parquetTimestampInMillisecond:S3Settings' :: Maybe Bool
parquetTimestampInMillisecond = forall a. Maybe a
Prelude.Nothing,
      $sel:parquetVersion:S3Settings' :: Maybe ParquetVersionValue
parquetVersion = forall a. Maybe a
Prelude.Nothing,
      $sel:preserveTransactions:S3Settings' :: Maybe Bool
preserveTransactions = forall a. Maybe a
Prelude.Nothing,
      $sel:rfc4180:S3Settings' :: Maybe Bool
rfc4180 = forall a. Maybe a
Prelude.Nothing,
      $sel:rowGroupLength:S3Settings' :: Maybe Int
rowGroupLength = forall a. Maybe a
Prelude.Nothing,
      $sel:serverSideEncryptionKmsKeyId:S3Settings' :: Maybe Text
serverSideEncryptionKmsKeyId = forall a. Maybe a
Prelude.Nothing,
      $sel:serviceAccessRoleArn:S3Settings' :: Maybe Text
serviceAccessRoleArn = forall a. Maybe a
Prelude.Nothing,
      $sel:timestampColumnName:S3Settings' :: Maybe Text
timestampColumnName = forall a. Maybe a
Prelude.Nothing,
      $sel:useCsvNoSupValue:S3Settings' :: Maybe Bool
useCsvNoSupValue = forall a. Maybe a
Prelude.Nothing,
      $sel:useTaskStartTimeForFullLoadTimestamp:S3Settings' :: Maybe Bool
useTaskStartTimeForFullLoadTimestamp =
        forall a. Maybe a
Prelude.Nothing
    }

-- | An optional parameter that, when set to @true@ or @y@, you can use to
-- add column name information to the .csv output file.
--
-- The default value is @false@. Valid values are @true@, @false@, @y@, and
-- @n@.
s3Settings_addColumnName :: Lens.Lens' S3Settings (Prelude.Maybe Prelude.Bool)
s3Settings_addColumnName :: Lens' S3Settings (Maybe Bool)
s3Settings_addColumnName = forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\S3Settings' {Maybe Bool
addColumnName :: Maybe Bool
$sel:addColumnName:S3Settings' :: S3Settings -> Maybe Bool
addColumnName} -> Maybe Bool
addColumnName) (\s :: S3Settings
s@S3Settings' {} Maybe Bool
a -> S3Settings
s {$sel:addColumnName:S3Settings' :: Maybe Bool
addColumnName = Maybe Bool
a} :: S3Settings)

-- | Use the S3 target endpoint setting @AddTrailingPaddingCharacter@ to add
-- padding on string data. The default value is @false@.
s3Settings_addTrailingPaddingCharacter :: Lens.Lens' S3Settings (Prelude.Maybe Prelude.Bool)
s3Settings_addTrailingPaddingCharacter :: Lens' S3Settings (Maybe Bool)
s3Settings_addTrailingPaddingCharacter = forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\S3Settings' {Maybe Bool
addTrailingPaddingCharacter :: Maybe Bool
$sel:addTrailingPaddingCharacter:S3Settings' :: S3Settings -> Maybe Bool
addTrailingPaddingCharacter} -> Maybe Bool
addTrailingPaddingCharacter) (\s :: S3Settings
s@S3Settings' {} Maybe Bool
a -> S3Settings
s {$sel:addTrailingPaddingCharacter:S3Settings' :: Maybe Bool
addTrailingPaddingCharacter = Maybe Bool
a} :: S3Settings)

-- | An optional parameter to set a folder name in the S3 bucket. If
-- provided, tables are created in the path
-- @ @/@bucketFolder@/@\/@/@schema_name@/@\/@/@table_name@/@\/@. If this
-- parameter isn\'t specified, then the path used is
-- @ @/@schema_name@/@\/@/@table_name@/@\/@.
s3Settings_bucketFolder :: Lens.Lens' S3Settings (Prelude.Maybe Prelude.Text)
s3Settings_bucketFolder :: Lens' S3Settings (Maybe Text)
s3Settings_bucketFolder = forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\S3Settings' {Maybe Text
bucketFolder :: Maybe Text
$sel:bucketFolder:S3Settings' :: S3Settings -> Maybe Text
bucketFolder} -> Maybe Text
bucketFolder) (\s :: S3Settings
s@S3Settings' {} Maybe Text
a -> S3Settings
s {$sel:bucketFolder:S3Settings' :: Maybe Text
bucketFolder = Maybe Text
a} :: S3Settings)

-- | The name of the S3 bucket.
s3Settings_bucketName :: Lens.Lens' S3Settings (Prelude.Maybe Prelude.Text)
s3Settings_bucketName :: Lens' S3Settings (Maybe Text)
s3Settings_bucketName = forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\S3Settings' {Maybe Text
bucketName :: Maybe Text
$sel:bucketName:S3Settings' :: S3Settings -> Maybe Text
bucketName} -> Maybe Text
bucketName) (\s :: S3Settings
s@S3Settings' {} Maybe Text
a -> S3Settings
s {$sel:bucketName:S3Settings' :: Maybe Text
bucketName = Maybe Text
a} :: S3Settings)

-- | A value that enables DMS to specify a predefined (canned) access control
-- list for objects created in an Amazon S3 bucket as .csv or .parquet
-- files. For more information about Amazon S3 canned ACLs, see
-- <http://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl Canned ACL>
-- in the /Amazon S3 Developer Guide./
--
-- The default value is NONE. Valid values include NONE, PRIVATE,
-- PUBLIC_READ, PUBLIC_READ_WRITE, AUTHENTICATED_READ, AWS_EXEC_READ,
-- BUCKET_OWNER_READ, and BUCKET_OWNER_FULL_CONTROL.
s3Settings_cannedAclForObjects :: Lens.Lens' S3Settings (Prelude.Maybe CannedAclForObjectsValue)
s3Settings_cannedAclForObjects :: Lens' S3Settings (Maybe CannedAclForObjectsValue)
s3Settings_cannedAclForObjects = forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\S3Settings' {Maybe CannedAclForObjectsValue
cannedAclForObjects :: Maybe CannedAclForObjectsValue
$sel:cannedAclForObjects:S3Settings' :: S3Settings -> Maybe CannedAclForObjectsValue
cannedAclForObjects} -> Maybe CannedAclForObjectsValue
cannedAclForObjects) (\s :: S3Settings
s@S3Settings' {} Maybe CannedAclForObjectsValue
a -> S3Settings
s {$sel:cannedAclForObjects:S3Settings' :: Maybe CannedAclForObjectsValue
cannedAclForObjects = Maybe CannedAclForObjectsValue
a} :: S3Settings)

-- | A value that enables a change data capture (CDC) load to write INSERT
-- and UPDATE operations to .csv or .parquet (columnar storage) output
-- files. The default setting is @false@, but when @CdcInsertsAndUpdates@
-- is set to @true@ or @y@, only INSERTs and UPDATEs from the source
-- database are migrated to the .csv or .parquet file.
--
-- For .csv file format only, how these INSERTs and UPDATEs are recorded
-- depends on the value of the @IncludeOpForFullLoad@ parameter. If
-- @IncludeOpForFullLoad@ is set to @true@, the first field of every CDC
-- record is set to either @I@ or @U@ to indicate INSERT and UPDATE
-- operations at the source. But if @IncludeOpForFullLoad@ is set to
-- @false@, CDC records are written without an indication of INSERT or
-- UPDATE operations at the source. For more information about how these
-- settings work together, see
-- <https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html#CHAP_Target.S3.Configuring.InsertOps Indicating Source DB Operations in Migrated S3 Data>
-- in the /Database Migration Service User Guide./.
--
-- DMS supports the use of the @CdcInsertsAndUpdates@ parameter in versions
-- 3.3.1 and later.
--
-- @CdcInsertsOnly@ and @CdcInsertsAndUpdates@ can\'t both be set to @true@
-- for the same endpoint. Set either @CdcInsertsOnly@ or
-- @CdcInsertsAndUpdates@ to @true@ for the same endpoint, but not both.
s3Settings_cdcInsertsAndUpdates :: Lens.Lens' S3Settings (Prelude.Maybe Prelude.Bool)
s3Settings_cdcInsertsAndUpdates :: Lens' S3Settings (Maybe Bool)
s3Settings_cdcInsertsAndUpdates = forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\S3Settings' {Maybe Bool
cdcInsertsAndUpdates :: Maybe Bool
$sel:cdcInsertsAndUpdates:S3Settings' :: S3Settings -> Maybe Bool
cdcInsertsAndUpdates} -> Maybe Bool
cdcInsertsAndUpdates) (\s :: S3Settings
s@S3Settings' {} Maybe Bool
a -> S3Settings
s {$sel:cdcInsertsAndUpdates:S3Settings' :: Maybe Bool
cdcInsertsAndUpdates = Maybe Bool
a} :: S3Settings)

-- | A value that enables a change data capture (CDC) load to write only
-- INSERT operations to .csv or columnar storage (.parquet) output files.
-- By default (the @false@ setting), the first field in a .csv or .parquet
-- record contains the letter I (INSERT), U (UPDATE), or D (DELETE). These
-- values indicate whether the row was inserted, updated, or deleted at the
-- source database for a CDC load to the target.
--
-- If @CdcInsertsOnly@ is set to @true@ or @y@, only INSERTs from the
-- source database are migrated to the .csv or .parquet file. For .csv
-- format only, how these INSERTs are recorded depends on the value of
-- @IncludeOpForFullLoad@. If @IncludeOpForFullLoad@ is set to @true@, the
-- first field of every CDC record is set to I to indicate the INSERT
-- operation at the source. If @IncludeOpForFullLoad@ is set to @false@,
-- every CDC record is written without a first field to indicate the INSERT
-- operation at the source. For more information about how these settings
-- work together, see
-- <https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html#CHAP_Target.S3.Configuring.InsertOps Indicating Source DB Operations in Migrated S3 Data>
-- in the /Database Migration Service User Guide./.
--
-- DMS supports the interaction described preceding between the
-- @CdcInsertsOnly@ and @IncludeOpForFullLoad@ parameters in versions 3.1.4
-- and later.
--
-- @CdcInsertsOnly@ and @CdcInsertsAndUpdates@ can\'t both be set to @true@
-- for the same endpoint. Set either @CdcInsertsOnly@ or
-- @CdcInsertsAndUpdates@ to @true@ for the same endpoint, but not both.
s3Settings_cdcInsertsOnly :: Lens.Lens' S3Settings (Prelude.Maybe Prelude.Bool)
s3Settings_cdcInsertsOnly :: Lens' S3Settings (Maybe Bool)
s3Settings_cdcInsertsOnly = forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\S3Settings' {Maybe Bool
cdcInsertsOnly :: Maybe Bool
$sel:cdcInsertsOnly:S3Settings' :: S3Settings -> Maybe Bool
cdcInsertsOnly} -> Maybe Bool
cdcInsertsOnly) (\s :: S3Settings
s@S3Settings' {} Maybe Bool
a -> S3Settings
s {$sel:cdcInsertsOnly:S3Settings' :: Maybe Bool
cdcInsertsOnly = Maybe Bool
a} :: S3Settings)

-- | Maximum length of the interval, defined in seconds, after which to
-- output a file to Amazon S3.
--
-- When @CdcMaxBatchInterval@ and @CdcMinFileSize@ are both specified, the
-- file write is triggered by whichever parameter condition is met first
-- within an DMS CloudFormation template.
--
-- The default value is 60 seconds.
s3Settings_cdcMaxBatchInterval :: Lens.Lens' S3Settings (Prelude.Maybe Prelude.Int)
s3Settings_cdcMaxBatchInterval :: Lens' S3Settings (Maybe Int)
s3Settings_cdcMaxBatchInterval = forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\S3Settings' {Maybe Int
cdcMaxBatchInterval :: Maybe Int
$sel:cdcMaxBatchInterval:S3Settings' :: S3Settings -> Maybe Int
cdcMaxBatchInterval} -> Maybe Int
cdcMaxBatchInterval) (\s :: S3Settings
s@S3Settings' {} Maybe Int
a -> S3Settings
s {$sel:cdcMaxBatchInterval:S3Settings' :: Maybe Int
cdcMaxBatchInterval = Maybe Int
a} :: S3Settings)

-- | Minimum file size, defined in kilobytes, to reach for a file output to
-- Amazon S3.
--
-- When @CdcMinFileSize@ and @CdcMaxBatchInterval@ are both specified, the
-- file write is triggered by whichever parameter condition is met first
-- within an DMS CloudFormation template.
--
-- The default value is 32 MB.
s3Settings_cdcMinFileSize :: Lens.Lens' S3Settings (Prelude.Maybe Prelude.Int)
s3Settings_cdcMinFileSize :: Lens' S3Settings (Maybe Int)
s3Settings_cdcMinFileSize = forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\S3Settings' {Maybe Int
cdcMinFileSize :: Maybe Int
$sel:cdcMinFileSize:S3Settings' :: S3Settings -> Maybe Int
cdcMinFileSize} -> Maybe Int
cdcMinFileSize) (\s :: S3Settings
s@S3Settings' {} Maybe Int
a -> S3Settings
s {$sel:cdcMinFileSize:S3Settings' :: Maybe Int
cdcMinFileSize = Maybe Int
a} :: S3Settings)

-- | Specifies the folder path of CDC files. For an S3 source, this setting
-- is required if a task captures change data; otherwise, it\'s optional.
-- If @CdcPath@ is set, DMS reads CDC files from this path and replicates
-- the data changes to the target endpoint. For an S3 target if you set
-- <https://docs.aws.amazon.com/dms/latest/APIReference/API_S3Settings.html#DMS-Type-S3Settings-PreserveTransactions PreserveTransactions>
-- to @true@, DMS verifies that you have set this parameter to a folder
-- path on your S3 target where DMS can save the transaction order for the
-- CDC load. DMS creates this CDC folder path in either your S3 target
-- working directory or the S3 target location specified by
-- <https://docs.aws.amazon.com/dms/latest/APIReference/API_S3Settings.html#DMS-Type-S3Settings-BucketFolder BucketFolder>
-- and
-- <https://docs.aws.amazon.com/dms/latest/APIReference/API_S3Settings.html#DMS-Type-S3Settings-BucketName BucketName>
-- .
--
-- For example, if you specify @CdcPath@ as @MyChangedData@, and you
-- specify @BucketName@ as @MyTargetBucket@ but do not specify
-- @BucketFolder@, DMS creates the CDC folder path following:
-- @MyTargetBucket\/MyChangedData@.
--
-- If you specify the same @CdcPath@, and you specify @BucketName@ as
-- @MyTargetBucket@ and @BucketFolder@ as @MyTargetData@, DMS creates the
-- CDC folder path following:
-- @MyTargetBucket\/MyTargetData\/MyChangedData@.
--
-- For more information on CDC including transaction order on an S3 target,
-- see
-- <https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html#CHAP_Target.S3.EndpointSettings.CdcPath Capturing data changes (CDC) including transaction order on the S3 target>.
--
-- This setting is supported in DMS versions 3.4.2 and later.
s3Settings_cdcPath :: Lens.Lens' S3Settings (Prelude.Maybe Prelude.Text)
s3Settings_cdcPath :: Lens' S3Settings (Maybe Text)
s3Settings_cdcPath = forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\S3Settings' {Maybe Text
cdcPath :: Maybe Text
$sel:cdcPath:S3Settings' :: S3Settings -> Maybe Text
cdcPath} -> Maybe Text
cdcPath) (\s :: S3Settings
s@S3Settings' {} Maybe Text
a -> S3Settings
s {$sel:cdcPath:S3Settings' :: Maybe Text
cdcPath = Maybe Text
a} :: S3Settings)

-- | An optional parameter to use GZIP to compress the target files. Set to
-- GZIP to compress the target files. Either set this parameter to NONE
-- (the default) or don\'t use it to leave the files uncompressed. This
-- parameter applies to both .csv and .parquet file formats.
s3Settings_compressionType :: Lens.Lens' S3Settings (Prelude.Maybe CompressionTypeValue)
s3Settings_compressionType :: Lens' S3Settings (Maybe CompressionTypeValue)
s3Settings_compressionType = forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\S3Settings' {Maybe CompressionTypeValue
compressionType :: Maybe CompressionTypeValue
$sel:compressionType:S3Settings' :: S3Settings -> Maybe CompressionTypeValue
compressionType} -> Maybe CompressionTypeValue
compressionType) (\s :: S3Settings
s@S3Settings' {} Maybe CompressionTypeValue
a -> S3Settings
s {$sel:compressionType:S3Settings' :: Maybe CompressionTypeValue
compressionType = Maybe CompressionTypeValue
a} :: S3Settings)

-- | The delimiter used to separate columns in the .csv file for both source
-- and target. The default is a comma.
s3Settings_csvDelimiter :: Lens.Lens' S3Settings (Prelude.Maybe Prelude.Text)
s3Settings_csvDelimiter :: Lens' S3Settings (Maybe Text)
s3Settings_csvDelimiter = forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\S3Settings' {Maybe Text
csvDelimiter :: Maybe Text
$sel:csvDelimiter:S3Settings' :: S3Settings -> Maybe Text
csvDelimiter} -> Maybe Text
csvDelimiter) (\s :: S3Settings
s@S3Settings' {} Maybe Text
a -> S3Settings
s {$sel:csvDelimiter:S3Settings' :: Maybe Text
csvDelimiter = Maybe Text
a} :: S3Settings)

-- | This setting only applies if your Amazon S3 output files during a change
-- data capture (CDC) load are written in .csv format. If
-- <https://docs.aws.amazon.com/dms/latest/APIReference/API_S3Settings.html#DMS-Type-S3Settings-UseCsvNoSupValue UseCsvNoSupValue>
-- is set to true, specify a string value that you want DMS to use for all
-- columns not included in the supplemental log. If you do not specify a
-- string value, DMS uses the null value for these columns regardless of
-- the @UseCsvNoSupValue@ setting.
--
-- This setting is supported in DMS versions 3.4.1 and later.
s3Settings_csvNoSupValue :: Lens.Lens' S3Settings (Prelude.Maybe Prelude.Text)
s3Settings_csvNoSupValue :: Lens' S3Settings (Maybe Text)
s3Settings_csvNoSupValue = forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\S3Settings' {Maybe Text
csvNoSupValue :: Maybe Text
$sel:csvNoSupValue:S3Settings' :: S3Settings -> Maybe Text
csvNoSupValue} -> Maybe Text
csvNoSupValue) (\s :: S3Settings
s@S3Settings' {} Maybe Text
a -> S3Settings
s {$sel:csvNoSupValue:S3Settings' :: Maybe Text
csvNoSupValue = Maybe Text
a} :: S3Settings)

-- | An optional parameter that specifies how DMS treats null values. While
-- handling the null value, you can use this parameter to pass a
-- user-defined string as null when writing to the target. For example,
-- when target columns are not nullable, you can use this option to
-- differentiate between the empty string value and the null value. So, if
-- you set this parameter value to the empty string (\"\" or \'\'), DMS
-- treats the empty string as the null value instead of @NULL@.
--
-- The default value is @NULL@. Valid values include any valid string.
s3Settings_csvNullValue :: Lens.Lens' S3Settings (Prelude.Maybe Prelude.Text)
s3Settings_csvNullValue :: Lens' S3Settings (Maybe Text)
s3Settings_csvNullValue = forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\S3Settings' {Maybe Text
csvNullValue :: Maybe Text
$sel:csvNullValue:S3Settings' :: S3Settings -> Maybe Text
csvNullValue} -> Maybe Text
csvNullValue) (\s :: S3Settings
s@S3Settings' {} Maybe Text
a -> S3Settings
s {$sel:csvNullValue:S3Settings' :: Maybe Text
csvNullValue = Maybe Text
a} :: S3Settings)

-- | The delimiter used to separate rows in the .csv file for both source and
-- target. The default is a carriage return (@\\n@).
s3Settings_csvRowDelimiter :: Lens.Lens' S3Settings (Prelude.Maybe Prelude.Text)
s3Settings_csvRowDelimiter :: Lens' S3Settings (Maybe Text)
s3Settings_csvRowDelimiter = forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\S3Settings' {Maybe Text
csvRowDelimiter :: Maybe Text
$sel:csvRowDelimiter:S3Settings' :: S3Settings -> Maybe Text
csvRowDelimiter} -> Maybe Text
csvRowDelimiter) (\s :: S3Settings
s@S3Settings' {} Maybe Text
a -> S3Settings
s {$sel:csvRowDelimiter:S3Settings' :: Maybe Text
csvRowDelimiter = Maybe Text
a} :: S3Settings)

-- | The format of the data that you want to use for output. You can choose
-- one of the following:
--
-- -   @csv@ : This is a row-based file format with comma-separated values
--     (.csv).
--
-- -   @parquet@ : Apache Parquet (.parquet) is a columnar storage file
--     format that features efficient compression and provides faster query
--     response.
s3Settings_dataFormat :: Lens.Lens' S3Settings (Prelude.Maybe DataFormatValue)
s3Settings_dataFormat :: Lens' S3Settings (Maybe DataFormatValue)
s3Settings_dataFormat = forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\S3Settings' {Maybe DataFormatValue
dataFormat :: Maybe DataFormatValue
$sel:dataFormat:S3Settings' :: S3Settings -> Maybe DataFormatValue
dataFormat} -> Maybe DataFormatValue
dataFormat) (\s :: S3Settings
s@S3Settings' {} Maybe DataFormatValue
a -> S3Settings
s {$sel:dataFormat:S3Settings' :: Maybe DataFormatValue
dataFormat = Maybe DataFormatValue
a} :: S3Settings)

-- | The size of one data page in bytes. This parameter defaults to 1024 *
-- 1024 bytes (1 MiB). This number is used for .parquet file format only.
s3Settings_dataPageSize :: Lens.Lens' S3Settings (Prelude.Maybe Prelude.Int)
s3Settings_dataPageSize :: Lens' S3Settings (Maybe Int)
s3Settings_dataPageSize = forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\S3Settings' {Maybe Int
dataPageSize :: Maybe Int
$sel:dataPageSize:S3Settings' :: S3Settings -> Maybe Int
dataPageSize} -> Maybe Int
dataPageSize) (\s :: S3Settings
s@S3Settings' {} Maybe Int
a -> S3Settings
s {$sel:dataPageSize:S3Settings' :: Maybe Int
dataPageSize = Maybe Int
a} :: S3Settings)

-- | Specifies a date separating delimiter to use during folder partitioning.
-- The default value is @SLASH@. Use this parameter when
-- @DatePartitionedEnabled@ is set to @true@.
s3Settings_datePartitionDelimiter :: Lens.Lens' S3Settings (Prelude.Maybe DatePartitionDelimiterValue)
s3Settings_datePartitionDelimiter :: Lens' S3Settings (Maybe DatePartitionDelimiterValue)
s3Settings_datePartitionDelimiter = forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\S3Settings' {Maybe DatePartitionDelimiterValue
datePartitionDelimiter :: Maybe DatePartitionDelimiterValue
$sel:datePartitionDelimiter:S3Settings' :: S3Settings -> Maybe DatePartitionDelimiterValue
datePartitionDelimiter} -> Maybe DatePartitionDelimiterValue
datePartitionDelimiter) (\s :: S3Settings
s@S3Settings' {} Maybe DatePartitionDelimiterValue
a -> S3Settings
s {$sel:datePartitionDelimiter:S3Settings' :: Maybe DatePartitionDelimiterValue
datePartitionDelimiter = Maybe DatePartitionDelimiterValue
a} :: S3Settings)

-- | When set to @true@, this parameter partitions S3 bucket folders based on
-- transaction commit dates. The default value is @false@. For more
-- information about date-based folder partitioning, see
-- <https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html#CHAP_Target.S3.DatePartitioning Using date-based folder partitioning>.
s3Settings_datePartitionEnabled :: Lens.Lens' S3Settings (Prelude.Maybe Prelude.Bool)
s3Settings_datePartitionEnabled :: Lens' S3Settings (Maybe Bool)
s3Settings_datePartitionEnabled = forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\S3Settings' {Maybe Bool
datePartitionEnabled :: Maybe Bool
$sel:datePartitionEnabled:S3Settings' :: S3Settings -> Maybe Bool
datePartitionEnabled} -> Maybe Bool
datePartitionEnabled) (\s :: S3Settings
s@S3Settings' {} Maybe Bool
a -> S3Settings
s {$sel:datePartitionEnabled:S3Settings' :: Maybe Bool
datePartitionEnabled = Maybe Bool
a} :: S3Settings)

-- | Identifies the sequence of the date format to use during folder
-- partitioning. The default value is @YYYYMMDD@. Use this parameter when
-- @DatePartitionedEnabled@ is set to @true@.
s3Settings_datePartitionSequence :: Lens.Lens' S3Settings (Prelude.Maybe DatePartitionSequenceValue)
s3Settings_datePartitionSequence :: Lens' S3Settings (Maybe DatePartitionSequenceValue)
s3Settings_datePartitionSequence = forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\S3Settings' {Maybe DatePartitionSequenceValue
datePartitionSequence :: Maybe DatePartitionSequenceValue
$sel:datePartitionSequence:S3Settings' :: S3Settings -> Maybe DatePartitionSequenceValue
datePartitionSequence} -> Maybe DatePartitionSequenceValue
datePartitionSequence) (\s :: S3Settings
s@S3Settings' {} Maybe DatePartitionSequenceValue
a -> S3Settings
s {$sel:datePartitionSequence:S3Settings' :: Maybe DatePartitionSequenceValue
datePartitionSequence = Maybe DatePartitionSequenceValue
a} :: S3Settings)

-- | When creating an S3 target endpoint, set @DatePartitionTimezone@ to
-- convert the current UTC time into a specified time zone. The conversion
-- occurs when a date partition folder is created and a CDC filename is
-- generated. The time zone format is Area\/Location. Use this parameter
-- when @DatePartitionedEnabled@ is set to @true@, as shown in the
-- following example.
--
-- @s3-settings=\'{\"DatePartitionEnabled\": true, \"DatePartitionSequence\": \"YYYYMMDDHH\", \"DatePartitionDelimiter\": \"SLASH\", \"DatePartitionTimezone\":\"@/@Asia\/Seoul@/@\", \"BucketName\": \"dms-nattarat-test\"}\'@
s3Settings_datePartitionTimezone :: Lens.Lens' S3Settings (Prelude.Maybe Prelude.Text)
s3Settings_datePartitionTimezone :: Lens' S3Settings (Maybe Text)
s3Settings_datePartitionTimezone = forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\S3Settings' {Maybe Text
datePartitionTimezone :: Maybe Text
$sel:datePartitionTimezone:S3Settings' :: S3Settings -> Maybe Text
datePartitionTimezone} -> Maybe Text
datePartitionTimezone) (\s :: S3Settings
s@S3Settings' {} Maybe Text
a -> S3Settings
s {$sel:datePartitionTimezone:S3Settings' :: Maybe Text
datePartitionTimezone = Maybe Text
a} :: S3Settings)

-- | The maximum size of an encoded dictionary page of a column. If the
-- dictionary page exceeds this, this column is stored using an encoding
-- type of @PLAIN@. This parameter defaults to 1024 * 1024 bytes (1 MiB),
-- the maximum size of a dictionary page before it reverts to @PLAIN@
-- encoding. This size is used for .parquet file format only.
s3Settings_dictPageSizeLimit :: Lens.Lens' S3Settings (Prelude.Maybe Prelude.Int)
s3Settings_dictPageSizeLimit :: Lens' S3Settings (Maybe Int)
s3Settings_dictPageSizeLimit = forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\S3Settings' {Maybe Int
dictPageSizeLimit :: Maybe Int
$sel:dictPageSizeLimit:S3Settings' :: S3Settings -> Maybe Int
dictPageSizeLimit} -> Maybe Int
dictPageSizeLimit) (\s :: S3Settings
s@S3Settings' {} Maybe Int
a -> S3Settings
s {$sel:dictPageSizeLimit:S3Settings' :: Maybe Int
dictPageSizeLimit = Maybe Int
a} :: S3Settings)

-- | A value that enables statistics for Parquet pages and row groups. Choose
-- @true@ to enable statistics, @false@ to disable. Statistics include
-- @NULL@, @DISTINCT@, @MAX@, and @MIN@ values. This parameter defaults to
-- @true@. This value is used for .parquet file format only.
s3Settings_enableStatistics :: Lens.Lens' S3Settings (Prelude.Maybe Prelude.Bool)
s3Settings_enableStatistics :: Lens' S3Settings (Maybe Bool)
s3Settings_enableStatistics = forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\S3Settings' {Maybe Bool
enableStatistics :: Maybe Bool
$sel:enableStatistics:S3Settings' :: S3Settings -> Maybe Bool
enableStatistics} -> Maybe Bool
enableStatistics) (\s :: S3Settings
s@S3Settings' {} Maybe Bool
a -> S3Settings
s {$sel:enableStatistics:S3Settings' :: Maybe Bool
enableStatistics = Maybe Bool
a} :: S3Settings)

-- | The type of encoding you are using:
--
-- -   @RLE_DICTIONARY@ uses a combination of bit-packing and run-length
--     encoding to store repeated values more efficiently. This is the
--     default.
--
-- -   @PLAIN@ doesn\'t use encoding at all. Values are stored as they are.
--
-- -   @PLAIN_DICTIONARY@ builds a dictionary of the values encountered in
--     a given column. The dictionary is stored in a dictionary page for
--     each column chunk.
s3Settings_encodingType :: Lens.Lens' S3Settings (Prelude.Maybe EncodingTypeValue)
s3Settings_encodingType :: Lens' S3Settings (Maybe EncodingTypeValue)
s3Settings_encodingType = forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\S3Settings' {Maybe EncodingTypeValue
encodingType :: Maybe EncodingTypeValue
$sel:encodingType:S3Settings' :: S3Settings -> Maybe EncodingTypeValue
encodingType} -> Maybe EncodingTypeValue
encodingType) (\s :: S3Settings
s@S3Settings' {} Maybe EncodingTypeValue
a -> S3Settings
s {$sel:encodingType:S3Settings' :: Maybe EncodingTypeValue
encodingType = Maybe EncodingTypeValue
a} :: S3Settings)

-- | The type of server-side encryption that you want to use for your data.
-- This encryption type is part of the endpoint settings or the extra
-- connections attributes for Amazon S3. You can choose either @SSE_S3@
-- (the default) or @SSE_KMS@.
--
-- For the @ModifyEndpoint@ operation, you can change the existing value of
-- the @EncryptionMode@ parameter from @SSE_KMS@ to @SSE_S3@. But you can’t
-- change the existing value from @SSE_S3@ to @SSE_KMS@.
--
-- To use @SSE_S3@, you need an Identity and Access Management (IAM) role
-- with permission to allow @\"arn:aws:s3:::dms-*\"@ to use the following
-- actions:
--
-- -   @s3:CreateBucket@
--
-- -   @s3:ListBucket@
--
-- -   @s3:DeleteBucket@
--
-- -   @s3:GetBucketLocation@
--
-- -   @s3:GetObject@
--
-- -   @s3:PutObject@
--
-- -   @s3:DeleteObject@
--
-- -   @s3:GetObjectVersion@
--
-- -   @s3:GetBucketPolicy@
--
-- -   @s3:PutBucketPolicy@
--
-- -   @s3:DeleteBucketPolicy@
s3Settings_encryptionMode :: Lens.Lens' S3Settings (Prelude.Maybe EncryptionModeValue)
s3Settings_encryptionMode :: Lens' S3Settings (Maybe EncryptionModeValue)
s3Settings_encryptionMode = forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\S3Settings' {Maybe EncryptionModeValue
encryptionMode :: Maybe EncryptionModeValue
$sel:encryptionMode:S3Settings' :: S3Settings -> Maybe EncryptionModeValue
encryptionMode} -> Maybe EncryptionModeValue
encryptionMode) (\s :: S3Settings
s@S3Settings' {} Maybe EncryptionModeValue
a -> S3Settings
s {$sel:encryptionMode:S3Settings' :: Maybe EncryptionModeValue
encryptionMode = Maybe EncryptionModeValue
a} :: S3Settings)

-- | To specify a bucket owner and prevent sniping, you can use the
-- @ExpectedBucketOwner@ endpoint setting.
--
-- Example:
-- @--s3-settings=\'{\"ExpectedBucketOwner\": \"@/@AWS_Account_ID@/@\"}\'@
--
-- When you make a request to test a connection or perform a migration, S3
-- checks the account ID of the bucket owner against the specified
-- parameter.
s3Settings_expectedBucketOwner :: Lens.Lens' S3Settings (Prelude.Maybe Prelude.Text)
s3Settings_expectedBucketOwner :: Lens' S3Settings (Maybe Text)
s3Settings_expectedBucketOwner = forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\S3Settings' {Maybe Text
expectedBucketOwner :: Maybe Text
$sel:expectedBucketOwner:S3Settings' :: S3Settings -> Maybe Text
expectedBucketOwner} -> Maybe Text
expectedBucketOwner) (\s :: S3Settings
s@S3Settings' {} Maybe Text
a -> S3Settings
s {$sel:expectedBucketOwner:S3Settings' :: Maybe Text
expectedBucketOwner = Maybe Text
a} :: S3Settings)

-- | Specifies how tables are defined in the S3 source files only.
s3Settings_externalTableDefinition :: Lens.Lens' S3Settings (Prelude.Maybe Prelude.Text)
s3Settings_externalTableDefinition :: Lens' S3Settings (Maybe Text)
s3Settings_externalTableDefinition = forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\S3Settings' {Maybe Text
externalTableDefinition :: Maybe Text
$sel:externalTableDefinition:S3Settings' :: S3Settings -> Maybe Text
externalTableDefinition} -> Maybe Text
externalTableDefinition) (\s :: S3Settings
s@S3Settings' {} Maybe Text
a -> S3Settings
s {$sel:externalTableDefinition:S3Settings' :: Maybe Text
externalTableDefinition = Maybe Text
a} :: S3Settings)

-- | When this value is set to 1, DMS ignores the first row header in a .csv
-- file. A value of 1 turns on the feature; a value of 0 turns off the
-- feature.
--
-- The default is 0.
s3Settings_ignoreHeaderRows :: Lens.Lens' S3Settings (Prelude.Maybe Prelude.Int)
s3Settings_ignoreHeaderRows :: Lens' S3Settings (Maybe Int)
s3Settings_ignoreHeaderRows = forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\S3Settings' {Maybe Int
ignoreHeaderRows :: Maybe Int
$sel:ignoreHeaderRows:S3Settings' :: S3Settings -> Maybe Int
ignoreHeaderRows} -> Maybe Int
ignoreHeaderRows) (\s :: S3Settings
s@S3Settings' {} Maybe Int
a -> S3Settings
s {$sel:ignoreHeaderRows:S3Settings' :: Maybe Int
ignoreHeaderRows = Maybe Int
a} :: S3Settings)

-- | A value that enables a full load to write INSERT operations to the
-- comma-separated value (.csv) output files only to indicate how the rows
-- were added to the source database.
--
-- DMS supports the @IncludeOpForFullLoad@ parameter in versions 3.1.4 and
-- later.
--
-- For full load, records can only be inserted. By default (the @false@
-- setting), no information is recorded in these output files for a full
-- load to indicate that the rows were inserted at the source database. If
-- @IncludeOpForFullLoad@ is set to @true@ or @y@, the INSERT is recorded
-- as an I annotation in the first field of the .csv file. This allows the
-- format of your target records from a full load to be consistent with the
-- target records from a CDC load.
--
-- This setting works together with the @CdcInsertsOnly@ and the
-- @CdcInsertsAndUpdates@ parameters for output to .csv files only. For
-- more information about how these settings work together, see
-- <https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html#CHAP_Target.S3.Configuring.InsertOps Indicating Source DB Operations in Migrated S3 Data>
-- in the /Database Migration Service User Guide./.
s3Settings_includeOpForFullLoad :: Lens.Lens' S3Settings (Prelude.Maybe Prelude.Bool)
s3Settings_includeOpForFullLoad :: Lens' S3Settings (Maybe Bool)
s3Settings_includeOpForFullLoad = forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\S3Settings' {Maybe Bool
includeOpForFullLoad :: Maybe Bool
$sel:includeOpForFullLoad:S3Settings' :: S3Settings -> Maybe Bool
includeOpForFullLoad} -> Maybe Bool
includeOpForFullLoad) (\s :: S3Settings
s@S3Settings' {} Maybe Bool
a -> S3Settings
s {$sel:includeOpForFullLoad:S3Settings' :: Maybe Bool
includeOpForFullLoad = Maybe Bool
a} :: S3Settings)

-- | A value that specifies the maximum size (in KB) of any .csv file to be
-- created while migrating to an S3 target during full load.
--
-- The default value is 1,048,576 KB (1 GB). Valid values include 1 to
-- 1,048,576.
s3Settings_maxFileSize :: Lens.Lens' S3Settings (Prelude.Maybe Prelude.Int)
s3Settings_maxFileSize :: Lens' S3Settings (Maybe Int)
s3Settings_maxFileSize = forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\S3Settings' {Maybe Int
maxFileSize :: Maybe Int
$sel:maxFileSize:S3Settings' :: S3Settings -> Maybe Int
maxFileSize} -> Maybe Int
maxFileSize) (\s :: S3Settings
s@S3Settings' {} Maybe Int
a -> S3Settings
s {$sel:maxFileSize:S3Settings' :: Maybe Int
maxFileSize = Maybe Int
a} :: S3Settings)

-- | A value that specifies the precision of any @TIMESTAMP@ column values
-- that are written to an Amazon S3 object file in .parquet format.
--
-- DMS supports the @ParquetTimestampInMillisecond@ parameter in versions
-- 3.1.4 and later.
--
-- When @ParquetTimestampInMillisecond@ is set to @true@ or @y@, DMS writes
-- all @TIMESTAMP@ columns in a .parquet formatted file with millisecond
-- precision. Otherwise, DMS writes them with microsecond precision.
--
-- Currently, Amazon Athena and Glue can handle only millisecond precision
-- for @TIMESTAMP@ values. Set this parameter to @true@ for S3 endpoint
-- object files that are .parquet formatted only if you plan to query or
-- process the data with Athena or Glue.
--
-- DMS writes any @TIMESTAMP@ column values written to an S3 file in .csv
-- format with microsecond precision.
--
-- Setting @ParquetTimestampInMillisecond@ has no effect on the string
-- format of the timestamp column value that is inserted by setting the
-- @TimestampColumnName@ parameter.
s3Settings_parquetTimestampInMillisecond :: Lens.Lens' S3Settings (Prelude.Maybe Prelude.Bool)
s3Settings_parquetTimestampInMillisecond :: Lens' S3Settings (Maybe Bool)
s3Settings_parquetTimestampInMillisecond = forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\S3Settings' {Maybe Bool
parquetTimestampInMillisecond :: Maybe Bool
$sel:parquetTimestampInMillisecond:S3Settings' :: S3Settings -> Maybe Bool
parquetTimestampInMillisecond} -> Maybe Bool
parquetTimestampInMillisecond) (\s :: S3Settings
s@S3Settings' {} Maybe Bool
a -> S3Settings
s {$sel:parquetTimestampInMillisecond:S3Settings' :: Maybe Bool
parquetTimestampInMillisecond = Maybe Bool
a} :: S3Settings)

-- | The version of the Apache Parquet format that you want to use:
-- @parquet_1_0@ (the default) or @parquet_2_0@.
s3Settings_parquetVersion :: Lens.Lens' S3Settings (Prelude.Maybe ParquetVersionValue)
s3Settings_parquetVersion :: Lens' S3Settings (Maybe ParquetVersionValue)
s3Settings_parquetVersion = forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\S3Settings' {Maybe ParquetVersionValue
parquetVersion :: Maybe ParquetVersionValue
$sel:parquetVersion:S3Settings' :: S3Settings -> Maybe ParquetVersionValue
parquetVersion} -> Maybe ParquetVersionValue
parquetVersion) (\s :: S3Settings
s@S3Settings' {} Maybe ParquetVersionValue
a -> S3Settings
s {$sel:parquetVersion:S3Settings' :: Maybe ParquetVersionValue
parquetVersion = Maybe ParquetVersionValue
a} :: S3Settings)

-- | If set to @true@, DMS saves the transaction order for a change data
-- capture (CDC) load on the Amazon S3 target specified by
-- <https://docs.aws.amazon.com/dms/latest/APIReference/API_S3Settings.html#DMS-Type-S3Settings-CdcPath CdcPath>
-- . For more information, see
-- <https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html#CHAP_Target.S3.EndpointSettings.CdcPath Capturing data changes (CDC) including transaction order on the S3 target>.
--
-- This setting is supported in DMS versions 3.4.2 and later.
s3Settings_preserveTransactions :: Lens.Lens' S3Settings (Prelude.Maybe Prelude.Bool)
s3Settings_preserveTransactions :: Lens' S3Settings (Maybe Bool)
s3Settings_preserveTransactions = forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\S3Settings' {Maybe Bool
preserveTransactions :: Maybe Bool
$sel:preserveTransactions:S3Settings' :: S3Settings -> Maybe Bool
preserveTransactions} -> Maybe Bool
preserveTransactions) (\s :: S3Settings
s@S3Settings' {} Maybe Bool
a -> S3Settings
s {$sel:preserveTransactions:S3Settings' :: Maybe Bool
preserveTransactions = Maybe Bool
a} :: S3Settings)

-- | For an S3 source, when this value is set to @true@ or @y@, each leading
-- double quotation mark has to be followed by an ending double quotation
-- mark. This formatting complies with RFC 4180. When this value is set to
-- @false@ or @n@, string literals are copied to the target as is. In this
-- case, a delimiter (row or column) signals the end of the field. Thus,
-- you can\'t use a delimiter as part of the string, because it signals the
-- end of the value.
--
-- For an S3 target, an optional parameter used to set behavior to comply
-- with RFC 4180 for data migrated to Amazon S3 using .csv file format
-- only. When this value is set to @true@ or @y@ using Amazon S3 as a
-- target, if the data has quotation marks or newline characters in it, DMS
-- encloses the entire column with an additional pair of double quotation
-- marks (\"). Every quotation mark within the data is repeated twice.
--
-- The default value is @true@. Valid values include @true@, @false@, @y@,
-- and @n@.
s3Settings_rfc4180 :: Lens.Lens' S3Settings (Prelude.Maybe Prelude.Bool)
s3Settings_rfc4180 :: Lens' S3Settings (Maybe Bool)
s3Settings_rfc4180 = forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\S3Settings' {Maybe Bool
rfc4180 :: Maybe Bool
$sel:rfc4180:S3Settings' :: S3Settings -> Maybe Bool
rfc4180} -> Maybe Bool
rfc4180) (\s :: S3Settings
s@S3Settings' {} Maybe Bool
a -> S3Settings
s {$sel:rfc4180:S3Settings' :: Maybe Bool
rfc4180 = Maybe Bool
a} :: S3Settings)

-- | The number of rows in a row group. A smaller row group size provides
-- faster reads. But as the number of row groups grows, the slower writes
-- become. This parameter defaults to 10,000 rows. This number is used for
-- .parquet file format only.
--
-- If you choose a value larger than the maximum, @RowGroupLength@ is set
-- to the max row group length in bytes (64 * 1024 * 1024).
s3Settings_rowGroupLength :: Lens.Lens' S3Settings (Prelude.Maybe Prelude.Int)
s3Settings_rowGroupLength :: Lens' S3Settings (Maybe Int)
s3Settings_rowGroupLength = forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\S3Settings' {Maybe Int
rowGroupLength :: Maybe Int
$sel:rowGroupLength:S3Settings' :: S3Settings -> Maybe Int
rowGroupLength} -> Maybe Int
rowGroupLength) (\s :: S3Settings
s@S3Settings' {} Maybe Int
a -> S3Settings
s {$sel:rowGroupLength:S3Settings' :: Maybe Int
rowGroupLength = Maybe Int
a} :: S3Settings)

-- | If you are using @SSE_KMS@ for the @EncryptionMode@, provide the KMS key
-- ID. The key that you use needs an attached policy that enables Identity
-- and Access Management (IAM) user permissions and allows use of the key.
--
-- Here is a CLI example:
-- @aws dms create-endpoint --endpoint-identifier @/@value@/@ --endpoint-type target --engine-name s3 --s3-settings ServiceAccessRoleArn=@/@value@/@,BucketFolder=@/@value@/@,BucketName=@/@value@/@,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=@/@value@/@ @
s3Settings_serverSideEncryptionKmsKeyId :: Lens.Lens' S3Settings (Prelude.Maybe Prelude.Text)
s3Settings_serverSideEncryptionKmsKeyId :: Lens' S3Settings (Maybe Text)
s3Settings_serverSideEncryptionKmsKeyId = forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\S3Settings' {Maybe Text
serverSideEncryptionKmsKeyId :: Maybe Text
$sel:serverSideEncryptionKmsKeyId:S3Settings' :: S3Settings -> Maybe Text
serverSideEncryptionKmsKeyId} -> Maybe Text
serverSideEncryptionKmsKeyId) (\s :: S3Settings
s@S3Settings' {} Maybe Text
a -> S3Settings
s {$sel:serverSideEncryptionKmsKeyId:S3Settings' :: Maybe Text
serverSideEncryptionKmsKeyId = Maybe Text
a} :: S3Settings)

-- | The Amazon Resource Name (ARN) used by the service to access the IAM
-- role. The role must allow the @iam:PassRole@ action. It is a required
-- parameter that enables DMS to write and read objects from an S3 bucket.
s3Settings_serviceAccessRoleArn :: Lens.Lens' S3Settings (Prelude.Maybe Prelude.Text)
s3Settings_serviceAccessRoleArn :: Lens' S3Settings (Maybe Text)
s3Settings_serviceAccessRoleArn = forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\S3Settings' {Maybe Text
serviceAccessRoleArn :: Maybe Text
$sel:serviceAccessRoleArn:S3Settings' :: S3Settings -> Maybe Text
serviceAccessRoleArn} -> Maybe Text
serviceAccessRoleArn) (\s :: S3Settings
s@S3Settings' {} Maybe Text
a -> S3Settings
s {$sel:serviceAccessRoleArn:S3Settings' :: Maybe Text
serviceAccessRoleArn = Maybe Text
a} :: S3Settings)

-- | A value that when nonblank causes DMS to add a column with timestamp
-- information to the endpoint data for an Amazon S3 target.
--
-- DMS supports the @TimestampColumnName@ parameter in versions 3.1.4 and
-- later.
--
-- DMS includes an additional @STRING@ column in the .csv or .parquet
-- object files of your migrated data when you set @TimestampColumnName@ to
-- a nonblank value.
--
-- For a full load, each row of this timestamp column contains a timestamp
-- for when the data was transferred from the source to the target by DMS.
--
-- For a change data capture (CDC) load, each row of the timestamp column
-- contains the timestamp for the commit of that row in the source
-- database.
--
-- The string format for this timestamp column value is
-- @yyyy-MM-dd HH:mm:ss.SSSSSS@. By default, the precision of this value is
-- in microseconds. For a CDC load, the rounding of the precision depends
-- on the commit timestamp supported by DMS for the source database.
--
-- When the @AddColumnName@ parameter is set to @true@, DMS also includes a
-- name for the timestamp column that you set with @TimestampColumnName@.
s3Settings_timestampColumnName :: Lens.Lens' S3Settings (Prelude.Maybe Prelude.Text)
s3Settings_timestampColumnName :: Lens' S3Settings (Maybe Text)
s3Settings_timestampColumnName = forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\S3Settings' {Maybe Text
timestampColumnName :: Maybe Text
$sel:timestampColumnName:S3Settings' :: S3Settings -> Maybe Text
timestampColumnName} -> Maybe Text
timestampColumnName) (\s :: S3Settings
s@S3Settings' {} Maybe Text
a -> S3Settings
s {$sel:timestampColumnName:S3Settings' :: Maybe Text
timestampColumnName = Maybe Text
a} :: S3Settings)

-- | This setting applies if the S3 output files during a change data capture
-- (CDC) load are written in .csv format. If set to @true@ for columns not
-- included in the supplemental log, DMS uses the value specified by
-- <https://docs.aws.amazon.com/dms/latest/APIReference/API_S3Settings.html#DMS-Type-S3Settings-CsvNoSupValue CsvNoSupValue>
-- . If not set or set to @false@, DMS uses the null value for these
-- columns.
--
-- This setting is supported in DMS versions 3.4.1 and later.
s3Settings_useCsvNoSupValue :: Lens.Lens' S3Settings (Prelude.Maybe Prelude.Bool)
s3Settings_useCsvNoSupValue :: Lens' S3Settings (Maybe Bool)
s3Settings_useCsvNoSupValue = forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\S3Settings' {Maybe Bool
useCsvNoSupValue :: Maybe Bool
$sel:useCsvNoSupValue:S3Settings' :: S3Settings -> Maybe Bool
useCsvNoSupValue} -> Maybe Bool
useCsvNoSupValue) (\s :: S3Settings
s@S3Settings' {} Maybe Bool
a -> S3Settings
s {$sel:useCsvNoSupValue:S3Settings' :: Maybe Bool
useCsvNoSupValue = Maybe Bool
a} :: S3Settings)

-- | When set to true, this parameter uses the task start time as the
-- timestamp column value instead of the time data is written to target.
-- For full load, when @useTaskStartTimeForFullLoadTimestamp@ is set to
-- @true@, each row of the timestamp column contains the task start time.
-- For CDC loads, each row of the timestamp column contains the transaction
-- commit time.
--
-- When @useTaskStartTimeForFullLoadTimestamp@ is set to @false@, the full
-- load timestamp in the timestamp column increments with the time data
-- arrives at the target.
s3Settings_useTaskStartTimeForFullLoadTimestamp :: Lens.Lens' S3Settings (Prelude.Maybe Prelude.Bool)
s3Settings_useTaskStartTimeForFullLoadTimestamp :: Lens' S3Settings (Maybe Bool)
s3Settings_useTaskStartTimeForFullLoadTimestamp = forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\S3Settings' {Maybe Bool
useTaskStartTimeForFullLoadTimestamp :: Maybe Bool
$sel:useTaskStartTimeForFullLoadTimestamp:S3Settings' :: S3Settings -> Maybe Bool
useTaskStartTimeForFullLoadTimestamp} -> Maybe Bool
useTaskStartTimeForFullLoadTimestamp) (\s :: S3Settings
s@S3Settings' {} Maybe Bool
a -> S3Settings
s {$sel:useTaskStartTimeForFullLoadTimestamp:S3Settings' :: Maybe Bool
useTaskStartTimeForFullLoadTimestamp = Maybe Bool
a} :: S3Settings)

instance Data.FromJSON S3Settings where
  parseJSON :: Value -> Parser S3Settings
parseJSON =
    forall a. String -> (Object -> Parser a) -> Value -> Parser a
Data.withObject
      String
"S3Settings"
      ( \Object
x ->
          Maybe Bool
-> Maybe Bool
-> Maybe Text
-> Maybe Text
-> Maybe CannedAclForObjectsValue
-> Maybe Bool
-> Maybe Bool
-> Maybe Int
-> Maybe Int
-> Maybe Text
-> Maybe CompressionTypeValue
-> Maybe Text
-> Maybe Text
-> Maybe Text
-> Maybe Text
-> Maybe DataFormatValue
-> Maybe Int
-> Maybe DatePartitionDelimiterValue
-> Maybe Bool
-> Maybe DatePartitionSequenceValue
-> Maybe Text
-> Maybe Int
-> Maybe Bool
-> Maybe EncodingTypeValue
-> Maybe EncryptionModeValue
-> Maybe Text
-> Maybe Text
-> Maybe Int
-> Maybe Bool
-> Maybe Int
-> Maybe Bool
-> Maybe ParquetVersionValue
-> Maybe Bool
-> Maybe Bool
-> Maybe Int
-> Maybe Text
-> Maybe Text
-> Maybe Text
-> Maybe Bool
-> Maybe Bool
-> S3Settings
S3Settings'
            forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> (Object
x forall a. FromJSON a => Object -> Key -> Parser (Maybe a)
Data..:? Key
"AddColumnName")
            forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (Object
x forall a. FromJSON a => Object -> Key -> Parser (Maybe a)
Data..:? Key
"AddTrailingPaddingCharacter")
            forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (Object
x forall a. FromJSON a => Object -> Key -> Parser (Maybe a)
Data..:? Key
"BucketFolder")
            forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (Object
x forall a. FromJSON a => Object -> Key -> Parser (Maybe a)
Data..:? Key
"BucketName")
            forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (Object
x forall a. FromJSON a => Object -> Key -> Parser (Maybe a)
Data..:? Key
"CannedAclForObjects")
            forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (Object
x forall a. FromJSON a => Object -> Key -> Parser (Maybe a)
Data..:? Key
"CdcInsertsAndUpdates")
            forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (Object
x forall a. FromJSON a => Object -> Key -> Parser (Maybe a)
Data..:? Key
"CdcInsertsOnly")
            forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (Object
x forall a. FromJSON a => Object -> Key -> Parser (Maybe a)
Data..:? Key
"CdcMaxBatchInterval")
            forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (Object
x forall a. FromJSON a => Object -> Key -> Parser (Maybe a)
Data..:? Key
"CdcMinFileSize")
            forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (Object
x forall a. FromJSON a => Object -> Key -> Parser (Maybe a)
Data..:? Key
"CdcPath")
            forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (Object
x forall a. FromJSON a => Object -> Key -> Parser (Maybe a)
Data..:? Key
"CompressionType")
            forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (Object
x forall a. FromJSON a => Object -> Key -> Parser (Maybe a)
Data..:? Key
"CsvDelimiter")
            forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (Object
x forall a. FromJSON a => Object -> Key -> Parser (Maybe a)
Data..:? Key
"CsvNoSupValue")
            forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (Object
x forall a. FromJSON a => Object -> Key -> Parser (Maybe a)
Data..:? Key
"CsvNullValue")
            forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (Object
x forall a. FromJSON a => Object -> Key -> Parser (Maybe a)
Data..:? Key
"CsvRowDelimiter")
            forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (Object
x forall a. FromJSON a => Object -> Key -> Parser (Maybe a)
Data..:? Key
"DataFormat")
            forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (Object
x forall a. FromJSON a => Object -> Key -> Parser (Maybe a)
Data..:? Key
"DataPageSize")
            forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (Object
x forall a. FromJSON a => Object -> Key -> Parser (Maybe a)
Data..:? Key
"DatePartitionDelimiter")
            forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (Object
x forall a. FromJSON a => Object -> Key -> Parser (Maybe a)
Data..:? Key
"DatePartitionEnabled")
            forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (Object
x forall a. FromJSON a => Object -> Key -> Parser (Maybe a)
Data..:? Key
"DatePartitionSequence")
            forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (Object
x forall a. FromJSON a => Object -> Key -> Parser (Maybe a)
Data..:? Key
"DatePartitionTimezone")
            forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (Object
x forall a. FromJSON a => Object -> Key -> Parser (Maybe a)
Data..:? Key
"DictPageSizeLimit")
            forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (Object
x forall a. FromJSON a => Object -> Key -> Parser (Maybe a)
Data..:? Key
"EnableStatistics")
            forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (Object
x forall a. FromJSON a => Object -> Key -> Parser (Maybe a)
Data..:? Key
"EncodingType")
            forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (Object
x forall a. FromJSON a => Object -> Key -> Parser (Maybe a)
Data..:? Key
"EncryptionMode")
            forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (Object
x forall a. FromJSON a => Object -> Key -> Parser (Maybe a)
Data..:? Key
"ExpectedBucketOwner")
            forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (Object
x forall a. FromJSON a => Object -> Key -> Parser (Maybe a)
Data..:? Key
"ExternalTableDefinition")
            forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (Object
x forall a. FromJSON a => Object -> Key -> Parser (Maybe a)
Data..:? Key
"IgnoreHeaderRows")
            forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (Object
x forall a. FromJSON a => Object -> Key -> Parser (Maybe a)
Data..:? Key
"IncludeOpForFullLoad")
            forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (Object
x forall a. FromJSON a => Object -> Key -> Parser (Maybe a)
Data..:? Key
"MaxFileSize")
            forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (Object
x forall a. FromJSON a => Object -> Key -> Parser (Maybe a)
Data..:? Key
"ParquetTimestampInMillisecond")
            forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (Object
x forall a. FromJSON a => Object -> Key -> Parser (Maybe a)
Data..:? Key
"ParquetVersion")
            forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (Object
x forall a. FromJSON a => Object -> Key -> Parser (Maybe a)
Data..:? Key
"PreserveTransactions")
            forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (Object
x forall a. FromJSON a => Object -> Key -> Parser (Maybe a)
Data..:? Key
"Rfc4180")
            forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (Object
x forall a. FromJSON a => Object -> Key -> Parser (Maybe a)
Data..:? Key
"RowGroupLength")
            forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (Object
x forall a. FromJSON a => Object -> Key -> Parser (Maybe a)
Data..:? Key
"ServerSideEncryptionKmsKeyId")
            forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (Object
x forall a. FromJSON a => Object -> Key -> Parser (Maybe a)
Data..:? Key
"ServiceAccessRoleArn")
            forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (Object
x forall a. FromJSON a => Object -> Key -> Parser (Maybe a)
Data..:? Key
"TimestampColumnName")
            forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (Object
x forall a. FromJSON a => Object -> Key -> Parser (Maybe a)
Data..:? Key
"UseCsvNoSupValue")
            forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (Object
x forall a. FromJSON a => Object -> Key -> Parser (Maybe a)
Data..:? Key
"UseTaskStartTimeForFullLoadTimestamp")
      )

instance Prelude.Hashable S3Settings where
  hashWithSalt :: Int -> S3Settings -> Int
hashWithSalt Int
_salt S3Settings' {Maybe Bool
Maybe Int
Maybe Text
Maybe CannedAclForObjectsValue
Maybe CompressionTypeValue
Maybe DataFormatValue
Maybe DatePartitionDelimiterValue
Maybe DatePartitionSequenceValue
Maybe EncodingTypeValue
Maybe EncryptionModeValue
Maybe ParquetVersionValue
useTaskStartTimeForFullLoadTimestamp :: Maybe Bool
useCsvNoSupValue :: Maybe Bool
timestampColumnName :: Maybe Text
serviceAccessRoleArn :: Maybe Text
serverSideEncryptionKmsKeyId :: Maybe Text
rowGroupLength :: Maybe Int
rfc4180 :: Maybe Bool
preserveTransactions :: Maybe Bool
parquetVersion :: Maybe ParquetVersionValue
parquetTimestampInMillisecond :: Maybe Bool
maxFileSize :: Maybe Int
includeOpForFullLoad :: Maybe Bool
ignoreHeaderRows :: Maybe Int
externalTableDefinition :: Maybe Text
expectedBucketOwner :: Maybe Text
encryptionMode :: Maybe EncryptionModeValue
encodingType :: Maybe EncodingTypeValue
enableStatistics :: Maybe Bool
dictPageSizeLimit :: Maybe Int
datePartitionTimezone :: Maybe Text
datePartitionSequence :: Maybe DatePartitionSequenceValue
datePartitionEnabled :: Maybe Bool
datePartitionDelimiter :: Maybe DatePartitionDelimiterValue
dataPageSize :: Maybe Int
dataFormat :: Maybe DataFormatValue
csvRowDelimiter :: Maybe Text
csvNullValue :: Maybe Text
csvNoSupValue :: Maybe Text
csvDelimiter :: Maybe Text
compressionType :: Maybe CompressionTypeValue
cdcPath :: Maybe Text
cdcMinFileSize :: Maybe Int
cdcMaxBatchInterval :: Maybe Int
cdcInsertsOnly :: Maybe Bool
cdcInsertsAndUpdates :: Maybe Bool
cannedAclForObjects :: Maybe CannedAclForObjectsValue
bucketName :: Maybe Text
bucketFolder :: Maybe Text
addTrailingPaddingCharacter :: Maybe Bool
addColumnName :: Maybe Bool
$sel:useTaskStartTimeForFullLoadTimestamp:S3Settings' :: S3Settings -> Maybe Bool
$sel:useCsvNoSupValue:S3Settings' :: S3Settings -> Maybe Bool
$sel:timestampColumnName:S3Settings' :: S3Settings -> Maybe Text
$sel:serviceAccessRoleArn:S3Settings' :: S3Settings -> Maybe Text
$sel:serverSideEncryptionKmsKeyId:S3Settings' :: S3Settings -> Maybe Text
$sel:rowGroupLength:S3Settings' :: S3Settings -> Maybe Int
$sel:rfc4180:S3Settings' :: S3Settings -> Maybe Bool
$sel:preserveTransactions:S3Settings' :: S3Settings -> Maybe Bool
$sel:parquetVersion:S3Settings' :: S3Settings -> Maybe ParquetVersionValue
$sel:parquetTimestampInMillisecond:S3Settings' :: S3Settings -> Maybe Bool
$sel:maxFileSize:S3Settings' :: S3Settings -> Maybe Int
$sel:includeOpForFullLoad:S3Settings' :: S3Settings -> Maybe Bool
$sel:ignoreHeaderRows:S3Settings' :: S3Settings -> Maybe Int
$sel:externalTableDefinition:S3Settings' :: S3Settings -> Maybe Text
$sel:expectedBucketOwner:S3Settings' :: S3Settings -> Maybe Text
$sel:encryptionMode:S3Settings' :: S3Settings -> Maybe EncryptionModeValue
$sel:encodingType:S3Settings' :: S3Settings -> Maybe EncodingTypeValue
$sel:enableStatistics:S3Settings' :: S3Settings -> Maybe Bool
$sel:dictPageSizeLimit:S3Settings' :: S3Settings -> Maybe Int
$sel:datePartitionTimezone:S3Settings' :: S3Settings -> Maybe Text
$sel:datePartitionSequence:S3Settings' :: S3Settings -> Maybe DatePartitionSequenceValue
$sel:datePartitionEnabled:S3Settings' :: S3Settings -> Maybe Bool
$sel:datePartitionDelimiter:S3Settings' :: S3Settings -> Maybe DatePartitionDelimiterValue
$sel:dataPageSize:S3Settings' :: S3Settings -> Maybe Int
$sel:dataFormat:S3Settings' :: S3Settings -> Maybe DataFormatValue
$sel:csvRowDelimiter:S3Settings' :: S3Settings -> Maybe Text
$sel:csvNullValue:S3Settings' :: S3Settings -> Maybe Text
$sel:csvNoSupValue:S3Settings' :: S3Settings -> Maybe Text
$sel:csvDelimiter:S3Settings' :: S3Settings -> Maybe Text
$sel:compressionType:S3Settings' :: S3Settings -> Maybe CompressionTypeValue
$sel:cdcPath:S3Settings' :: S3Settings -> Maybe Text
$sel:cdcMinFileSize:S3Settings' :: S3Settings -> Maybe Int
$sel:cdcMaxBatchInterval:S3Settings' :: S3Settings -> Maybe Int
$sel:cdcInsertsOnly:S3Settings' :: S3Settings -> Maybe Bool
$sel:cdcInsertsAndUpdates:S3Settings' :: S3Settings -> Maybe Bool
$sel:cannedAclForObjects:S3Settings' :: S3Settings -> Maybe CannedAclForObjectsValue
$sel:bucketName:S3Settings' :: S3Settings -> Maybe Text
$sel:bucketFolder:S3Settings' :: S3Settings -> Maybe Text
$sel:addTrailingPaddingCharacter:S3Settings' :: S3Settings -> Maybe Bool
$sel:addColumnName:S3Settings' :: S3Settings -> Maybe Bool
..} =
    Int
_salt
      forall a. Hashable a => Int -> a -> Int
`Prelude.hashWithSalt` Maybe Bool
addColumnName
      forall a. Hashable a => Int -> a -> Int
`Prelude.hashWithSalt` Maybe Bool
addTrailingPaddingCharacter
      forall a. Hashable a => Int -> a -> Int
`Prelude.hashWithSalt` Maybe Text
bucketFolder
      forall a. Hashable a => Int -> a -> Int
`Prelude.hashWithSalt` Maybe Text
bucketName
      forall a. Hashable a => Int -> a -> Int
`Prelude.hashWithSalt` Maybe CannedAclForObjectsValue
cannedAclForObjects
      forall a. Hashable a => Int -> a -> Int
`Prelude.hashWithSalt` Maybe Bool
cdcInsertsAndUpdates
      forall a. Hashable a => Int -> a -> Int
`Prelude.hashWithSalt` Maybe Bool
cdcInsertsOnly
      forall a. Hashable a => Int -> a -> Int
`Prelude.hashWithSalt` Maybe Int
cdcMaxBatchInterval
      forall a. Hashable a => Int -> a -> Int
`Prelude.hashWithSalt` Maybe Int
cdcMinFileSize
      forall a. Hashable a => Int -> a -> Int
`Prelude.hashWithSalt` Maybe Text
cdcPath
      forall a. Hashable a => Int -> a -> Int
`Prelude.hashWithSalt` Maybe CompressionTypeValue
compressionType
      forall a. Hashable a => Int -> a -> Int
`Prelude.hashWithSalt` Maybe Text
csvDelimiter
      forall a. Hashable a => Int -> a -> Int
`Prelude.hashWithSalt` Maybe Text
csvNoSupValue
      forall a. Hashable a => Int -> a -> Int
`Prelude.hashWithSalt` Maybe Text
csvNullValue
      forall a. Hashable a => Int -> a -> Int
`Prelude.hashWithSalt` Maybe Text
csvRowDelimiter
      forall a. Hashable a => Int -> a -> Int
`Prelude.hashWithSalt` Maybe DataFormatValue
dataFormat
      forall a. Hashable a => Int -> a -> Int
`Prelude.hashWithSalt` Maybe Int
dataPageSize
      forall a. Hashable a => Int -> a -> Int
`Prelude.hashWithSalt` Maybe DatePartitionDelimiterValue
datePartitionDelimiter
      forall a. Hashable a => Int -> a -> Int
`Prelude.hashWithSalt` Maybe Bool
datePartitionEnabled
      forall a. Hashable a => Int -> a -> Int
`Prelude.hashWithSalt` Maybe DatePartitionSequenceValue
datePartitionSequence
      forall a. Hashable a => Int -> a -> Int
`Prelude.hashWithSalt` Maybe Text
datePartitionTimezone
      forall a. Hashable a => Int -> a -> Int
`Prelude.hashWithSalt` Maybe Int
dictPageSizeLimit
      forall a. Hashable a => Int -> a -> Int
`Prelude.hashWithSalt` Maybe Bool
enableStatistics
      forall a. Hashable a => Int -> a -> Int
`Prelude.hashWithSalt` Maybe EncodingTypeValue
encodingType
      forall a. Hashable a => Int -> a -> Int
`Prelude.hashWithSalt` Maybe EncryptionModeValue
encryptionMode
      forall a. Hashable a => Int -> a -> Int
`Prelude.hashWithSalt` Maybe Text
expectedBucketOwner
      forall a. Hashable a => Int -> a -> Int
`Prelude.hashWithSalt` Maybe Text
externalTableDefinition
      forall a. Hashable a => Int -> a -> Int
`Prelude.hashWithSalt` Maybe Int
ignoreHeaderRows
      forall a. Hashable a => Int -> a -> Int
`Prelude.hashWithSalt` Maybe Bool
includeOpForFullLoad
      forall a. Hashable a => Int -> a -> Int
`Prelude.hashWithSalt` Maybe Int
maxFileSize
      forall a. Hashable a => Int -> a -> Int
`Prelude.hashWithSalt` Maybe Bool
parquetTimestampInMillisecond
      forall a. Hashable a => Int -> a -> Int
`Prelude.hashWithSalt` Maybe ParquetVersionValue
parquetVersion
      forall a. Hashable a => Int -> a -> Int
`Prelude.hashWithSalt` Maybe Bool
preserveTransactions
      forall a. Hashable a => Int -> a -> Int
`Prelude.hashWithSalt` Maybe Bool
rfc4180
      forall a. Hashable a => Int -> a -> Int
`Prelude.hashWithSalt` Maybe Int
rowGroupLength
      forall a. Hashable a => Int -> a -> Int
`Prelude.hashWithSalt` Maybe Text
serverSideEncryptionKmsKeyId
      forall a. Hashable a => Int -> a -> Int
`Prelude.hashWithSalt` Maybe Text
serviceAccessRoleArn
      forall a. Hashable a => Int -> a -> Int
`Prelude.hashWithSalt` Maybe Text
timestampColumnName
      forall a. Hashable a => Int -> a -> Int
`Prelude.hashWithSalt` Maybe Bool
useCsvNoSupValue
      forall a. Hashable a => Int -> a -> Int
`Prelude.hashWithSalt` Maybe Bool
useTaskStartTimeForFullLoadTimestamp

instance Prelude.NFData S3Settings where
  rnf :: S3Settings -> ()
rnf S3Settings' {Maybe Bool
Maybe Int
Maybe Text
Maybe CannedAclForObjectsValue
Maybe CompressionTypeValue
Maybe DataFormatValue
Maybe DatePartitionDelimiterValue
Maybe DatePartitionSequenceValue
Maybe EncodingTypeValue
Maybe EncryptionModeValue
Maybe ParquetVersionValue
useTaskStartTimeForFullLoadTimestamp :: Maybe Bool
useCsvNoSupValue :: Maybe Bool
timestampColumnName :: Maybe Text
serviceAccessRoleArn :: Maybe Text
serverSideEncryptionKmsKeyId :: Maybe Text
rowGroupLength :: Maybe Int
rfc4180 :: Maybe Bool
preserveTransactions :: Maybe Bool
parquetVersion :: Maybe ParquetVersionValue
parquetTimestampInMillisecond :: Maybe Bool
maxFileSize :: Maybe Int
includeOpForFullLoad :: Maybe Bool
ignoreHeaderRows :: Maybe Int
externalTableDefinition :: Maybe Text
expectedBucketOwner :: Maybe Text
encryptionMode :: Maybe EncryptionModeValue
encodingType :: Maybe EncodingTypeValue
enableStatistics :: Maybe Bool
dictPageSizeLimit :: Maybe Int
datePartitionTimezone :: Maybe Text
datePartitionSequence :: Maybe DatePartitionSequenceValue
datePartitionEnabled :: Maybe Bool
datePartitionDelimiter :: Maybe DatePartitionDelimiterValue
dataPageSize :: Maybe Int
dataFormat :: Maybe DataFormatValue
csvRowDelimiter :: Maybe Text
csvNullValue :: Maybe Text
csvNoSupValue :: Maybe Text
csvDelimiter :: Maybe Text
compressionType :: Maybe CompressionTypeValue
cdcPath :: Maybe Text
cdcMinFileSize :: Maybe Int
cdcMaxBatchInterval :: Maybe Int
cdcInsertsOnly :: Maybe Bool
cdcInsertsAndUpdates :: Maybe Bool
cannedAclForObjects :: Maybe CannedAclForObjectsValue
bucketName :: Maybe Text
bucketFolder :: Maybe Text
addTrailingPaddingCharacter :: Maybe Bool
addColumnName :: Maybe Bool
$sel:useTaskStartTimeForFullLoadTimestamp:S3Settings' :: S3Settings -> Maybe Bool
$sel:useCsvNoSupValue:S3Settings' :: S3Settings -> Maybe Bool
$sel:timestampColumnName:S3Settings' :: S3Settings -> Maybe Text
$sel:serviceAccessRoleArn:S3Settings' :: S3Settings -> Maybe Text
$sel:serverSideEncryptionKmsKeyId:S3Settings' :: S3Settings -> Maybe Text
$sel:rowGroupLength:S3Settings' :: S3Settings -> Maybe Int
$sel:rfc4180:S3Settings' :: S3Settings -> Maybe Bool
$sel:preserveTransactions:S3Settings' :: S3Settings -> Maybe Bool
$sel:parquetVersion:S3Settings' :: S3Settings -> Maybe ParquetVersionValue
$sel:parquetTimestampInMillisecond:S3Settings' :: S3Settings -> Maybe Bool
$sel:maxFileSize:S3Settings' :: S3Settings -> Maybe Int
$sel:includeOpForFullLoad:S3Settings' :: S3Settings -> Maybe Bool
$sel:ignoreHeaderRows:S3Settings' :: S3Settings -> Maybe Int
$sel:externalTableDefinition:S3Settings' :: S3Settings -> Maybe Text
$sel:expectedBucketOwner:S3Settings' :: S3Settings -> Maybe Text
$sel:encryptionMode:S3Settings' :: S3Settings -> Maybe EncryptionModeValue
$sel:encodingType:S3Settings' :: S3Settings -> Maybe EncodingTypeValue
$sel:enableStatistics:S3Settings' :: S3Settings -> Maybe Bool
$sel:dictPageSizeLimit:S3Settings' :: S3Settings -> Maybe Int
$sel:datePartitionTimezone:S3Settings' :: S3Settings -> Maybe Text
$sel:datePartitionSequence:S3Settings' :: S3Settings -> Maybe DatePartitionSequenceValue
$sel:datePartitionEnabled:S3Settings' :: S3Settings -> Maybe Bool
$sel:datePartitionDelimiter:S3Settings' :: S3Settings -> Maybe DatePartitionDelimiterValue
$sel:dataPageSize:S3Settings' :: S3Settings -> Maybe Int
$sel:dataFormat:S3Settings' :: S3Settings -> Maybe DataFormatValue
$sel:csvRowDelimiter:S3Settings' :: S3Settings -> Maybe Text
$sel:csvNullValue:S3Settings' :: S3Settings -> Maybe Text
$sel:csvNoSupValue:S3Settings' :: S3Settings -> Maybe Text
$sel:csvDelimiter:S3Settings' :: S3Settings -> Maybe Text
$sel:compressionType:S3Settings' :: S3Settings -> Maybe CompressionTypeValue
$sel:cdcPath:S3Settings' :: S3Settings -> Maybe Text
$sel:cdcMinFileSize:S3Settings' :: S3Settings -> Maybe Int
$sel:cdcMaxBatchInterval:S3Settings' :: S3Settings -> Maybe Int
$sel:cdcInsertsOnly:S3Settings' :: S3Settings -> Maybe Bool
$sel:cdcInsertsAndUpdates:S3Settings' :: S3Settings -> Maybe Bool
$sel:cannedAclForObjects:S3Settings' :: S3Settings -> Maybe CannedAclForObjectsValue
$sel:bucketName:S3Settings' :: S3Settings -> Maybe Text
$sel:bucketFolder:S3Settings' :: S3Settings -> Maybe Text
$sel:addTrailingPaddingCharacter:S3Settings' :: S3Settings -> Maybe Bool
$sel:addColumnName:S3Settings' :: S3Settings -> Maybe Bool
..} =
    forall a. NFData a => a -> ()
Prelude.rnf Maybe Bool
addColumnName
      seq :: forall a b. a -> b -> b
`Prelude.seq` forall a. NFData a => a -> ()
Prelude.rnf Maybe Bool
addTrailingPaddingCharacter
      seq :: forall a b. a -> b -> b
`Prelude.seq` forall a. NFData a => a -> ()
Prelude.rnf Maybe Text
bucketFolder
      seq :: forall a b. a -> b -> b
`Prelude.seq` forall a. NFData a => a -> ()
Prelude.rnf Maybe Text
bucketName
      seq :: forall a b. a -> b -> b
`Prelude.seq` forall a. NFData a => a -> ()
Prelude.rnf Maybe CannedAclForObjectsValue
cannedAclForObjects
      seq :: forall a b. a -> b -> b
`Prelude.seq` forall a. NFData a => a -> ()
Prelude.rnf Maybe Bool
cdcInsertsAndUpdates
      seq :: forall a b. a -> b -> b
`Prelude.seq` forall a. NFData a => a -> ()
Prelude.rnf Maybe Bool
cdcInsertsOnly
      seq :: forall a b. a -> b -> b
`Prelude.seq` forall a. NFData a => a -> ()
Prelude.rnf Maybe Int
cdcMaxBatchInterval
      seq :: forall a b. a -> b -> b
`Prelude.seq` forall a. NFData a => a -> ()
Prelude.rnf Maybe Int
cdcMinFileSize
      seq :: forall a b. a -> b -> b
`Prelude.seq` forall a. NFData a => a -> ()
Prelude.rnf Maybe Text
cdcPath
      seq :: forall a b. a -> b -> b
`Prelude.seq` forall a. NFData a => a -> ()
Prelude.rnf Maybe CompressionTypeValue
compressionType
      seq :: forall a b. a -> b -> b
`Prelude.seq` forall a. NFData a => a -> ()
Prelude.rnf Maybe Text
csvDelimiter
      seq :: forall a b. a -> b -> b
`Prelude.seq` forall a. NFData a => a -> ()
Prelude.rnf Maybe Text
csvNoSupValue
      seq :: forall a b. a -> b -> b
`Prelude.seq` forall a. NFData a => a -> ()
Prelude.rnf Maybe Text
csvNullValue
      seq :: forall a b. a -> b -> b
`Prelude.seq` forall a. NFData a => a -> ()
Prelude.rnf Maybe Text
csvRowDelimiter
      seq :: forall a b. a -> b -> b
`Prelude.seq` forall a. NFData a => a -> ()
Prelude.rnf Maybe DataFormatValue
dataFormat
      seq :: forall a b. a -> b -> b
`Prelude.seq` forall a. NFData a => a -> ()
Prelude.rnf Maybe Int
dataPageSize
      seq :: forall a b. a -> b -> b
`Prelude.seq` forall a. NFData a => a -> ()
Prelude.rnf Maybe DatePartitionDelimiterValue
datePartitionDelimiter
      seq :: forall a b. a -> b -> b
`Prelude.seq` forall a. NFData a => a -> ()
Prelude.rnf Maybe Bool
datePartitionEnabled
      seq :: forall a b. a -> b -> b
`Prelude.seq` forall a. NFData a => a -> ()
Prelude.rnf
        Maybe DatePartitionSequenceValue
datePartitionSequence
      seq :: forall a b. a -> b -> b
`Prelude.seq` forall a. NFData a => a -> ()
Prelude.rnf
        Maybe Text
datePartitionTimezone
      seq :: forall a b. a -> b -> b
`Prelude.seq` forall a. NFData a => a -> ()
Prelude.rnf
        Maybe Int
dictPageSizeLimit
      seq :: forall a b. a -> b -> b
`Prelude.seq` forall a. NFData a => a -> ()
Prelude.rnf
        Maybe Bool
enableStatistics
      seq :: forall a b. a -> b -> b
`Prelude.seq` forall a. NFData a => a -> ()
Prelude.rnf
        Maybe EncodingTypeValue
encodingType
      seq :: forall a b. a -> b -> b
`Prelude.seq` forall a. NFData a => a -> ()
Prelude.rnf
        Maybe EncryptionModeValue
encryptionMode
      seq :: forall a b. a -> b -> b
`Prelude.seq` forall a. NFData a => a -> ()
Prelude.rnf
        Maybe Text
expectedBucketOwner
      seq :: forall a b. a -> b -> b
`Prelude.seq` forall a. NFData a => a -> ()
Prelude.rnf
        Maybe Text
externalTableDefinition
      seq :: forall a b. a -> b -> b
`Prelude.seq` forall a. NFData a => a -> ()
Prelude.rnf
        Maybe Int
ignoreHeaderRows
      seq :: forall a b. a -> b -> b
`Prelude.seq` forall a. NFData a => a -> ()
Prelude.rnf
        Maybe Bool
includeOpForFullLoad
      seq :: forall a b. a -> b -> b
`Prelude.seq` forall a. NFData a => a -> ()
Prelude.rnf
        Maybe Int
maxFileSize
      seq :: forall a b. a -> b -> b
`Prelude.seq` forall a. NFData a => a -> ()
Prelude.rnf
        Maybe Bool
parquetTimestampInMillisecond
      seq :: forall a b. a -> b -> b
`Prelude.seq` forall a. NFData a => a -> ()
Prelude.rnf
        Maybe ParquetVersionValue
parquetVersion
      seq :: forall a b. a -> b -> b
`Prelude.seq` forall a. NFData a => a -> ()
Prelude.rnf
        Maybe Bool
preserveTransactions
      seq :: forall a b. a -> b -> b
`Prelude.seq` forall a. NFData a => a -> ()
Prelude.rnf
        Maybe Bool
rfc4180
      seq :: forall a b. a -> b -> b
`Prelude.seq` forall a. NFData a => a -> ()
Prelude.rnf
        Maybe Int
rowGroupLength
      seq :: forall a b. a -> b -> b
`Prelude.seq` forall a. NFData a => a -> ()
Prelude.rnf
        Maybe Text
serverSideEncryptionKmsKeyId
      seq :: forall a b. a -> b -> b
`Prelude.seq` forall a. NFData a => a -> ()
Prelude.rnf
        Maybe Text
serviceAccessRoleArn
      seq :: forall a b. a -> b -> b
`Prelude.seq` forall a. NFData a => a -> ()
Prelude.rnf
        Maybe Text
timestampColumnName
      seq :: forall a b. a -> b -> b
`Prelude.seq` forall a. NFData a => a -> ()
Prelude.rnf
        Maybe Bool
useCsvNoSupValue
      seq :: forall a b. a -> b -> b
`Prelude.seq` forall a. NFData a => a -> ()
Prelude.rnf
        Maybe Bool
useTaskStartTimeForFullLoadTimestamp

instance Data.ToJSON S3Settings where
  toJSON :: S3Settings -> Value
toJSON S3Settings' {Maybe Bool
Maybe Int
Maybe Text
Maybe CannedAclForObjectsValue
Maybe CompressionTypeValue
Maybe DataFormatValue
Maybe DatePartitionDelimiterValue
Maybe DatePartitionSequenceValue
Maybe EncodingTypeValue
Maybe EncryptionModeValue
Maybe ParquetVersionValue
useTaskStartTimeForFullLoadTimestamp :: Maybe Bool
useCsvNoSupValue :: Maybe Bool
timestampColumnName :: Maybe Text
serviceAccessRoleArn :: Maybe Text
serverSideEncryptionKmsKeyId :: Maybe Text
rowGroupLength :: Maybe Int
rfc4180 :: Maybe Bool
preserveTransactions :: Maybe Bool
parquetVersion :: Maybe ParquetVersionValue
parquetTimestampInMillisecond :: Maybe Bool
maxFileSize :: Maybe Int
includeOpForFullLoad :: Maybe Bool
ignoreHeaderRows :: Maybe Int
externalTableDefinition :: Maybe Text
expectedBucketOwner :: Maybe Text
encryptionMode :: Maybe EncryptionModeValue
encodingType :: Maybe EncodingTypeValue
enableStatistics :: Maybe Bool
dictPageSizeLimit :: Maybe Int
datePartitionTimezone :: Maybe Text
datePartitionSequence :: Maybe DatePartitionSequenceValue
datePartitionEnabled :: Maybe Bool
datePartitionDelimiter :: Maybe DatePartitionDelimiterValue
dataPageSize :: Maybe Int
dataFormat :: Maybe DataFormatValue
csvRowDelimiter :: Maybe Text
csvNullValue :: Maybe Text
csvNoSupValue :: Maybe Text
csvDelimiter :: Maybe Text
compressionType :: Maybe CompressionTypeValue
cdcPath :: Maybe Text
cdcMinFileSize :: Maybe Int
cdcMaxBatchInterval :: Maybe Int
cdcInsertsOnly :: Maybe Bool
cdcInsertsAndUpdates :: Maybe Bool
cannedAclForObjects :: Maybe CannedAclForObjectsValue
bucketName :: Maybe Text
bucketFolder :: Maybe Text
addTrailingPaddingCharacter :: Maybe Bool
addColumnName :: Maybe Bool
$sel:useTaskStartTimeForFullLoadTimestamp:S3Settings' :: S3Settings -> Maybe Bool
$sel:useCsvNoSupValue:S3Settings' :: S3Settings -> Maybe Bool
$sel:timestampColumnName:S3Settings' :: S3Settings -> Maybe Text
$sel:serviceAccessRoleArn:S3Settings' :: S3Settings -> Maybe Text
$sel:serverSideEncryptionKmsKeyId:S3Settings' :: S3Settings -> Maybe Text
$sel:rowGroupLength:S3Settings' :: S3Settings -> Maybe Int
$sel:rfc4180:S3Settings' :: S3Settings -> Maybe Bool
$sel:preserveTransactions:S3Settings' :: S3Settings -> Maybe Bool
$sel:parquetVersion:S3Settings' :: S3Settings -> Maybe ParquetVersionValue
$sel:parquetTimestampInMillisecond:S3Settings' :: S3Settings -> Maybe Bool
$sel:maxFileSize:S3Settings' :: S3Settings -> Maybe Int
$sel:includeOpForFullLoad:S3Settings' :: S3Settings -> Maybe Bool
$sel:ignoreHeaderRows:S3Settings' :: S3Settings -> Maybe Int
$sel:externalTableDefinition:S3Settings' :: S3Settings -> Maybe Text
$sel:expectedBucketOwner:S3Settings' :: S3Settings -> Maybe Text
$sel:encryptionMode:S3Settings' :: S3Settings -> Maybe EncryptionModeValue
$sel:encodingType:S3Settings' :: S3Settings -> Maybe EncodingTypeValue
$sel:enableStatistics:S3Settings' :: S3Settings -> Maybe Bool
$sel:dictPageSizeLimit:S3Settings' :: S3Settings -> Maybe Int
$sel:datePartitionTimezone:S3Settings' :: S3Settings -> Maybe Text
$sel:datePartitionSequence:S3Settings' :: S3Settings -> Maybe DatePartitionSequenceValue
$sel:datePartitionEnabled:S3Settings' :: S3Settings -> Maybe Bool
$sel:datePartitionDelimiter:S3Settings' :: S3Settings -> Maybe DatePartitionDelimiterValue
$sel:dataPageSize:S3Settings' :: S3Settings -> Maybe Int
$sel:dataFormat:S3Settings' :: S3Settings -> Maybe DataFormatValue
$sel:csvRowDelimiter:S3Settings' :: S3Settings -> Maybe Text
$sel:csvNullValue:S3Settings' :: S3Settings -> Maybe Text
$sel:csvNoSupValue:S3Settings' :: S3Settings -> Maybe Text
$sel:csvDelimiter:S3Settings' :: S3Settings -> Maybe Text
$sel:compressionType:S3Settings' :: S3Settings -> Maybe CompressionTypeValue
$sel:cdcPath:S3Settings' :: S3Settings -> Maybe Text
$sel:cdcMinFileSize:S3Settings' :: S3Settings -> Maybe Int
$sel:cdcMaxBatchInterval:S3Settings' :: S3Settings -> Maybe Int
$sel:cdcInsertsOnly:S3Settings' :: S3Settings -> Maybe Bool
$sel:cdcInsertsAndUpdates:S3Settings' :: S3Settings -> Maybe Bool
$sel:cannedAclForObjects:S3Settings' :: S3Settings -> Maybe CannedAclForObjectsValue
$sel:bucketName:S3Settings' :: S3Settings -> Maybe Text
$sel:bucketFolder:S3Settings' :: S3Settings -> Maybe Text
$sel:addTrailingPaddingCharacter:S3Settings' :: S3Settings -> Maybe Bool
$sel:addColumnName:S3Settings' :: S3Settings -> Maybe Bool
..} =
    [Pair] -> Value
Data.object
      ( forall a. [Maybe a] -> [a]
Prelude.catMaybes
          [ (Key
"AddColumnName" forall kv v. (KeyValue kv, ToJSON v) => Key -> v -> kv
Data..=) forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> Maybe Bool
addColumnName,
            (Key
"AddTrailingPaddingCharacter" forall kv v. (KeyValue kv, ToJSON v) => Key -> v -> kv
Data..=)
              forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> Maybe Bool
addTrailingPaddingCharacter,
            (Key
"BucketFolder" forall kv v. (KeyValue kv, ToJSON v) => Key -> v -> kv
Data..=) forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> Maybe Text
bucketFolder,
            (Key
"BucketName" forall kv v. (KeyValue kv, ToJSON v) => Key -> v -> kv
Data..=) forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> Maybe Text
bucketName,
            (Key
"CannedAclForObjects" forall kv v. (KeyValue kv, ToJSON v) => Key -> v -> kv
Data..=)
              forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> Maybe CannedAclForObjectsValue
cannedAclForObjects,
            (Key
"CdcInsertsAndUpdates" forall kv v. (KeyValue kv, ToJSON v) => Key -> v -> kv
Data..=)
              forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> Maybe Bool
cdcInsertsAndUpdates,
            (Key
"CdcInsertsOnly" forall kv v. (KeyValue kv, ToJSON v) => Key -> v -> kv
Data..=)
              forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> Maybe Bool
cdcInsertsOnly,
            (Key
"CdcMaxBatchInterval" forall kv v. (KeyValue kv, ToJSON v) => Key -> v -> kv
Data..=)
              forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> Maybe Int
cdcMaxBatchInterval,
            (Key
"CdcMinFileSize" forall kv v. (KeyValue kv, ToJSON v) => Key -> v -> kv
Data..=)
              forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> Maybe Int
cdcMinFileSize,
            (Key
"CdcPath" forall kv v. (KeyValue kv, ToJSON v) => Key -> v -> kv
Data..=) forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> Maybe Text
cdcPath,
            (Key
"CompressionType" forall kv v. (KeyValue kv, ToJSON v) => Key -> v -> kv
Data..=)
              forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> Maybe CompressionTypeValue
compressionType,
            (Key
"CsvDelimiter" forall kv v. (KeyValue kv, ToJSON v) => Key -> v -> kv
Data..=) forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> Maybe Text
csvDelimiter,
            (Key
"CsvNoSupValue" forall kv v. (KeyValue kv, ToJSON v) => Key -> v -> kv
Data..=) forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> Maybe Text
csvNoSupValue,
            (Key
"CsvNullValue" forall kv v. (KeyValue kv, ToJSON v) => Key -> v -> kv
Data..=) forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> Maybe Text
csvNullValue,
            (Key
"CsvRowDelimiter" forall kv v. (KeyValue kv, ToJSON v) => Key -> v -> kv
Data..=)
              forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> Maybe Text
csvRowDelimiter,
            (Key
"DataFormat" forall kv v. (KeyValue kv, ToJSON v) => Key -> v -> kv
Data..=) forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> Maybe DataFormatValue
dataFormat,
            (Key
"DataPageSize" forall kv v. (KeyValue kv, ToJSON v) => Key -> v -> kv
Data..=) forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> Maybe Int
dataPageSize,
            (Key
"DatePartitionDelimiter" forall kv v. (KeyValue kv, ToJSON v) => Key -> v -> kv
Data..=)
              forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> Maybe DatePartitionDelimiterValue
datePartitionDelimiter,
            (Key
"DatePartitionEnabled" forall kv v. (KeyValue kv, ToJSON v) => Key -> v -> kv
Data..=)
              forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> Maybe Bool
datePartitionEnabled,
            (Key
"DatePartitionSequence" forall kv v. (KeyValue kv, ToJSON v) => Key -> v -> kv
Data..=)
              forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> Maybe DatePartitionSequenceValue
datePartitionSequence,
            (Key
"DatePartitionTimezone" forall kv v. (KeyValue kv, ToJSON v) => Key -> v -> kv
Data..=)
              forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> Maybe Text
datePartitionTimezone,
            (Key
"DictPageSizeLimit" forall kv v. (KeyValue kv, ToJSON v) => Key -> v -> kv
Data..=)
              forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> Maybe Int
dictPageSizeLimit,
            (Key
"EnableStatistics" forall kv v. (KeyValue kv, ToJSON v) => Key -> v -> kv
Data..=)
              forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> Maybe Bool
enableStatistics,
            (Key
"EncodingType" forall kv v. (KeyValue kv, ToJSON v) => Key -> v -> kv
Data..=) forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> Maybe EncodingTypeValue
encodingType,
            (Key
"EncryptionMode" forall kv v. (KeyValue kv, ToJSON v) => Key -> v -> kv
Data..=)
              forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> Maybe EncryptionModeValue
encryptionMode,
            (Key
"ExpectedBucketOwner" forall kv v. (KeyValue kv, ToJSON v) => Key -> v -> kv
Data..=)
              forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> Maybe Text
expectedBucketOwner,
            (Key
"ExternalTableDefinition" forall kv v. (KeyValue kv, ToJSON v) => Key -> v -> kv
Data..=)
              forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> Maybe Text
externalTableDefinition,
            (Key
"IgnoreHeaderRows" forall kv v. (KeyValue kv, ToJSON v) => Key -> v -> kv
Data..=)
              forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> Maybe Int
ignoreHeaderRows,
            (Key
"IncludeOpForFullLoad" forall kv v. (KeyValue kv, ToJSON v) => Key -> v -> kv
Data..=)
              forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> Maybe Bool
includeOpForFullLoad,
            (Key
"MaxFileSize" forall kv v. (KeyValue kv, ToJSON v) => Key -> v -> kv
Data..=) forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> Maybe Int
maxFileSize,
            (Key
"ParquetTimestampInMillisecond" forall kv v. (KeyValue kv, ToJSON v) => Key -> v -> kv
Data..=)
              forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> Maybe Bool
parquetTimestampInMillisecond,
            (Key
"ParquetVersion" forall kv v. (KeyValue kv, ToJSON v) => Key -> v -> kv
Data..=)
              forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> Maybe ParquetVersionValue
parquetVersion,
            (Key
"PreserveTransactions" forall kv v. (KeyValue kv, ToJSON v) => Key -> v -> kv
Data..=)
              forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> Maybe Bool
preserveTransactions,
            (Key
"Rfc4180" forall kv v. (KeyValue kv, ToJSON v) => Key -> v -> kv
Data..=) forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> Maybe Bool
rfc4180,
            (Key
"RowGroupLength" forall kv v. (KeyValue kv, ToJSON v) => Key -> v -> kv
Data..=)
              forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> Maybe Int
rowGroupLength,
            (Key
"ServerSideEncryptionKmsKeyId" forall kv v. (KeyValue kv, ToJSON v) => Key -> v -> kv
Data..=)
              forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> Maybe Text
serverSideEncryptionKmsKeyId,
            (Key
"ServiceAccessRoleArn" forall kv v. (KeyValue kv, ToJSON v) => Key -> v -> kv
Data..=)
              forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> Maybe Text
serviceAccessRoleArn,
            (Key
"TimestampColumnName" forall kv v. (KeyValue kv, ToJSON v) => Key -> v -> kv
Data..=)
              forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> Maybe Text
timestampColumnName,
            (Key
"UseCsvNoSupValue" forall kv v. (KeyValue kv, ToJSON v) => Key -> v -> kv
Data..=)
              forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> Maybe Bool
useCsvNoSupValue,
            (Key
"UseTaskStartTimeForFullLoadTimestamp" forall kv v. (KeyValue kv, ToJSON v) => Key -> v -> kv
Data..=)
              forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> Maybe Bool
useTaskStartTimeForFullLoadTimestamp
          ]
      )