{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE DuplicateRecordFields #-}
{-# LANGUAGE NamedFieldPuns #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE StrictData #-}
{-# LANGUAGE NoImplicitPrelude #-}
{-# OPTIONS_GHC -fno-warn-unused-imports #-}
{-# OPTIONS_GHC -fno-warn-unused-matches #-}

-- Derived from AWS service descriptions, licensed under Apache 2.0.

-- |
-- Module      : Amazonka.Firehose.Types.ParquetSerDe
-- Copyright   : (c) 2013-2023 Brendan Hay
-- License     : Mozilla Public License, v. 2.0.
-- Maintainer  : Brendan Hay
-- Stability   : auto-generated
-- Portability : non-portable (GHC extensions)
module Amazonka.Firehose.Types.ParquetSerDe where

import qualified Amazonka.Core as Core
import qualified Amazonka.Core.Lens.Internal as Lens
import qualified Amazonka.Data as Data
import Amazonka.Firehose.Types.ParquetCompression
import Amazonka.Firehose.Types.ParquetWriterVersion
import qualified Amazonka.Prelude as Prelude

-- | A serializer to use for converting data to the Parquet format before
-- storing it in Amazon S3. For more information, see
-- <https://parquet.apache.org/documentation/latest/ Apache Parquet>.
--
-- /See:/ 'newParquetSerDe' smart constructor.
data ParquetSerDe = ParquetSerDe'
  { -- | The Hadoop Distributed File System (HDFS) block size. This is useful if
    -- you intend to copy the data from Amazon S3 to HDFS before querying. The
    -- default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses
    -- this value for padding calculations.
    ParquetSerDe -> Maybe Natural
blockSizeBytes :: Prelude.Maybe Prelude.Natural,
    -- | The compression code to use over data blocks. The possible values are
    -- @UNCOMPRESSED@, @SNAPPY@, and @GZIP@, with the default being @SNAPPY@.
    -- Use @SNAPPY@ for higher decompression speed. Use @GZIP@ if the
    -- compression ratio is more important than speed.
    ParquetSerDe -> Maybe ParquetCompression
compression :: Prelude.Maybe ParquetCompression,
    -- | Indicates whether to enable dictionary compression.
    ParquetSerDe -> Maybe Bool
enableDictionaryCompression :: Prelude.Maybe Prelude.Bool,
    -- | The maximum amount of padding to apply. This is useful if you intend to
    -- copy the data from Amazon S3 to HDFS before querying. The default is 0.
    ParquetSerDe -> Maybe Natural
maxPaddingBytes :: Prelude.Maybe Prelude.Natural,
    -- | The Parquet page size. Column chunks are divided into pages. A page is
    -- conceptually an indivisible unit (in terms of compression and encoding).
    -- The minimum value is 64 KiB and the default is 1 MiB.
    ParquetSerDe -> Maybe Natural
pageSizeBytes :: Prelude.Maybe Prelude.Natural,
    -- | Indicates the version of row format to output. The possible values are
    -- @V1@ and @V2@. The default is @V1@.
    ParquetSerDe -> Maybe ParquetWriterVersion
writerVersion :: Prelude.Maybe ParquetWriterVersion
  }
  deriving (ParquetSerDe -> ParquetSerDe -> Bool
forall a. (a -> a -> Bool) -> (a -> a -> Bool) -> Eq a
/= :: ParquetSerDe -> ParquetSerDe -> Bool
$c/= :: ParquetSerDe -> ParquetSerDe -> Bool
== :: ParquetSerDe -> ParquetSerDe -> Bool
$c== :: ParquetSerDe -> ParquetSerDe -> Bool
Prelude.Eq, ReadPrec [ParquetSerDe]
ReadPrec ParquetSerDe
Int -> ReadS ParquetSerDe
ReadS [ParquetSerDe]
forall a.
(Int -> ReadS a)
-> ReadS [a] -> ReadPrec a -> ReadPrec [a] -> Read a
readListPrec :: ReadPrec [ParquetSerDe]
$creadListPrec :: ReadPrec [ParquetSerDe]
readPrec :: ReadPrec ParquetSerDe
$creadPrec :: ReadPrec ParquetSerDe
readList :: ReadS [ParquetSerDe]
$creadList :: ReadS [ParquetSerDe]
readsPrec :: Int -> ReadS ParquetSerDe
$creadsPrec :: Int -> ReadS ParquetSerDe
Prelude.Read, Int -> ParquetSerDe -> ShowS
[ParquetSerDe] -> ShowS
ParquetSerDe -> String
forall a.
(Int -> a -> ShowS) -> (a -> String) -> ([a] -> ShowS) -> Show a
showList :: [ParquetSerDe] -> ShowS
$cshowList :: [ParquetSerDe] -> ShowS
show :: ParquetSerDe -> String
$cshow :: ParquetSerDe -> String
showsPrec :: Int -> ParquetSerDe -> ShowS
$cshowsPrec :: Int -> ParquetSerDe -> ShowS
Prelude.Show, forall x. Rep ParquetSerDe x -> ParquetSerDe
forall x. ParquetSerDe -> Rep ParquetSerDe x
forall a.
(forall x. a -> Rep a x) -> (forall x. Rep a x -> a) -> Generic a
$cto :: forall x. Rep ParquetSerDe x -> ParquetSerDe
$cfrom :: forall x. ParquetSerDe -> Rep ParquetSerDe x
Prelude.Generic)

-- |
-- Create a value of 'ParquetSerDe' with all optional fields omitted.
--
-- Use <https://hackage.haskell.org/package/generic-lens generic-lens> or <https://hackage.haskell.org/package/optics optics> to modify other optional fields.
--
-- The following record fields are available, with the corresponding lenses provided
-- for backwards compatibility:
--
-- 'blockSizeBytes', 'parquetSerDe_blockSizeBytes' - The Hadoop Distributed File System (HDFS) block size. This is useful if
-- you intend to copy the data from Amazon S3 to HDFS before querying. The
-- default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses
-- this value for padding calculations.
--
-- 'compression', 'parquetSerDe_compression' - The compression code to use over data blocks. The possible values are
-- @UNCOMPRESSED@, @SNAPPY@, and @GZIP@, with the default being @SNAPPY@.
-- Use @SNAPPY@ for higher decompression speed. Use @GZIP@ if the
-- compression ratio is more important than speed.
--
-- 'enableDictionaryCompression', 'parquetSerDe_enableDictionaryCompression' - Indicates whether to enable dictionary compression.
--
-- 'maxPaddingBytes', 'parquetSerDe_maxPaddingBytes' - The maximum amount of padding to apply. This is useful if you intend to
-- copy the data from Amazon S3 to HDFS before querying. The default is 0.
--
-- 'pageSizeBytes', 'parquetSerDe_pageSizeBytes' - The Parquet page size. Column chunks are divided into pages. A page is
-- conceptually an indivisible unit (in terms of compression and encoding).
-- The minimum value is 64 KiB and the default is 1 MiB.
--
-- 'writerVersion', 'parquetSerDe_writerVersion' - Indicates the version of row format to output. The possible values are
-- @V1@ and @V2@. The default is @V1@.
newParquetSerDe ::
  ParquetSerDe
newParquetSerDe :: ParquetSerDe
newParquetSerDe =
  ParquetSerDe'
    { $sel:blockSizeBytes:ParquetSerDe' :: Maybe Natural
blockSizeBytes = forall a. Maybe a
Prelude.Nothing,
      $sel:compression:ParquetSerDe' :: Maybe ParquetCompression
compression = forall a. Maybe a
Prelude.Nothing,
      $sel:enableDictionaryCompression:ParquetSerDe' :: Maybe Bool
enableDictionaryCompression = forall a. Maybe a
Prelude.Nothing,
      $sel:maxPaddingBytes:ParquetSerDe' :: Maybe Natural
maxPaddingBytes = forall a. Maybe a
Prelude.Nothing,
      $sel:pageSizeBytes:ParquetSerDe' :: Maybe Natural
pageSizeBytes = forall a. Maybe a
Prelude.Nothing,
      $sel:writerVersion:ParquetSerDe' :: Maybe ParquetWriterVersion
writerVersion = forall a. Maybe a
Prelude.Nothing
    }

-- | The Hadoop Distributed File System (HDFS) block size. This is useful if
-- you intend to copy the data from Amazon S3 to HDFS before querying. The
-- default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses
-- this value for padding calculations.
parquetSerDe_blockSizeBytes :: Lens.Lens' ParquetSerDe (Prelude.Maybe Prelude.Natural)
parquetSerDe_blockSizeBytes :: Lens' ParquetSerDe (Maybe Natural)
parquetSerDe_blockSizeBytes = forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\ParquetSerDe' {Maybe Natural
blockSizeBytes :: Maybe Natural
$sel:blockSizeBytes:ParquetSerDe' :: ParquetSerDe -> Maybe Natural
blockSizeBytes} -> Maybe Natural
blockSizeBytes) (\s :: ParquetSerDe
s@ParquetSerDe' {} Maybe Natural
a -> ParquetSerDe
s {$sel:blockSizeBytes:ParquetSerDe' :: Maybe Natural
blockSizeBytes = Maybe Natural
a} :: ParquetSerDe)

-- | The compression code to use over data blocks. The possible values are
-- @UNCOMPRESSED@, @SNAPPY@, and @GZIP@, with the default being @SNAPPY@.
-- Use @SNAPPY@ for higher decompression speed. Use @GZIP@ if the
-- compression ratio is more important than speed.
parquetSerDe_compression :: Lens.Lens' ParquetSerDe (Prelude.Maybe ParquetCompression)
parquetSerDe_compression :: Lens' ParquetSerDe (Maybe ParquetCompression)
parquetSerDe_compression = forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\ParquetSerDe' {Maybe ParquetCompression
compression :: Maybe ParquetCompression
$sel:compression:ParquetSerDe' :: ParquetSerDe -> Maybe ParquetCompression
compression} -> Maybe ParquetCompression
compression) (\s :: ParquetSerDe
s@ParquetSerDe' {} Maybe ParquetCompression
a -> ParquetSerDe
s {$sel:compression:ParquetSerDe' :: Maybe ParquetCompression
compression = Maybe ParquetCompression
a} :: ParquetSerDe)

-- | Indicates whether to enable dictionary compression.
parquetSerDe_enableDictionaryCompression :: Lens.Lens' ParquetSerDe (Prelude.Maybe Prelude.Bool)
parquetSerDe_enableDictionaryCompression :: Lens' ParquetSerDe (Maybe Bool)
parquetSerDe_enableDictionaryCompression = forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\ParquetSerDe' {Maybe Bool
enableDictionaryCompression :: Maybe Bool
$sel:enableDictionaryCompression:ParquetSerDe' :: ParquetSerDe -> Maybe Bool
enableDictionaryCompression} -> Maybe Bool
enableDictionaryCompression) (\s :: ParquetSerDe
s@ParquetSerDe' {} Maybe Bool
a -> ParquetSerDe
s {$sel:enableDictionaryCompression:ParquetSerDe' :: Maybe Bool
enableDictionaryCompression = Maybe Bool
a} :: ParquetSerDe)

-- | The maximum amount of padding to apply. This is useful if you intend to
-- copy the data from Amazon S3 to HDFS before querying. The default is 0.
parquetSerDe_maxPaddingBytes :: Lens.Lens' ParquetSerDe (Prelude.Maybe Prelude.Natural)
parquetSerDe_maxPaddingBytes :: Lens' ParquetSerDe (Maybe Natural)
parquetSerDe_maxPaddingBytes = forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\ParquetSerDe' {Maybe Natural
maxPaddingBytes :: Maybe Natural
$sel:maxPaddingBytes:ParquetSerDe' :: ParquetSerDe -> Maybe Natural
maxPaddingBytes} -> Maybe Natural
maxPaddingBytes) (\s :: ParquetSerDe
s@ParquetSerDe' {} Maybe Natural
a -> ParquetSerDe
s {$sel:maxPaddingBytes:ParquetSerDe' :: Maybe Natural
maxPaddingBytes = Maybe Natural
a} :: ParquetSerDe)

-- | The Parquet page size. Column chunks are divided into pages. A page is
-- conceptually an indivisible unit (in terms of compression and encoding).
-- The minimum value is 64 KiB and the default is 1 MiB.
parquetSerDe_pageSizeBytes :: Lens.Lens' ParquetSerDe (Prelude.Maybe Prelude.Natural)
parquetSerDe_pageSizeBytes :: Lens' ParquetSerDe (Maybe Natural)
parquetSerDe_pageSizeBytes = forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\ParquetSerDe' {Maybe Natural
pageSizeBytes :: Maybe Natural
$sel:pageSizeBytes:ParquetSerDe' :: ParquetSerDe -> Maybe Natural
pageSizeBytes} -> Maybe Natural
pageSizeBytes) (\s :: ParquetSerDe
s@ParquetSerDe' {} Maybe Natural
a -> ParquetSerDe
s {$sel:pageSizeBytes:ParquetSerDe' :: Maybe Natural
pageSizeBytes = Maybe Natural
a} :: ParquetSerDe)

-- | Indicates the version of row format to output. The possible values are
-- @V1@ and @V2@. The default is @V1@.
parquetSerDe_writerVersion :: Lens.Lens' ParquetSerDe (Prelude.Maybe ParquetWriterVersion)
parquetSerDe_writerVersion :: Lens' ParquetSerDe (Maybe ParquetWriterVersion)
parquetSerDe_writerVersion = forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\ParquetSerDe' {Maybe ParquetWriterVersion
writerVersion :: Maybe ParquetWriterVersion
$sel:writerVersion:ParquetSerDe' :: ParquetSerDe -> Maybe ParquetWriterVersion
writerVersion} -> Maybe ParquetWriterVersion
writerVersion) (\s :: ParquetSerDe
s@ParquetSerDe' {} Maybe ParquetWriterVersion
a -> ParquetSerDe
s {$sel:writerVersion:ParquetSerDe' :: Maybe ParquetWriterVersion
writerVersion = Maybe ParquetWriterVersion
a} :: ParquetSerDe)

instance Data.FromJSON ParquetSerDe where
  parseJSON :: Value -> Parser ParquetSerDe
parseJSON =
    forall a. String -> (Object -> Parser a) -> Value -> Parser a
Data.withObject
      String
"ParquetSerDe"
      ( \Object
x ->
          Maybe Natural
-> Maybe ParquetCompression
-> Maybe Bool
-> Maybe Natural
-> Maybe Natural
-> Maybe ParquetWriterVersion
-> ParquetSerDe
ParquetSerDe'
            forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> (Object
x forall a. FromJSON a => Object -> Key -> Parser (Maybe a)
Data..:? Key
"BlockSizeBytes")
            forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (Object
x forall a. FromJSON a => Object -> Key -> Parser (Maybe a)
Data..:? Key
"Compression")
            forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (Object
x forall a. FromJSON a => Object -> Key -> Parser (Maybe a)
Data..:? Key
"EnableDictionaryCompression")
            forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (Object
x forall a. FromJSON a => Object -> Key -> Parser (Maybe a)
Data..:? Key
"MaxPaddingBytes")
            forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (Object
x forall a. FromJSON a => Object -> Key -> Parser (Maybe a)
Data..:? Key
"PageSizeBytes")
            forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (Object
x forall a. FromJSON a => Object -> Key -> Parser (Maybe a)
Data..:? Key
"WriterVersion")
      )

instance Prelude.Hashable ParquetSerDe where
  hashWithSalt :: Int -> ParquetSerDe -> Int
hashWithSalt Int
_salt ParquetSerDe' {Maybe Bool
Maybe Natural
Maybe ParquetCompression
Maybe ParquetWriterVersion
writerVersion :: Maybe ParquetWriterVersion
pageSizeBytes :: Maybe Natural
maxPaddingBytes :: Maybe Natural
enableDictionaryCompression :: Maybe Bool
compression :: Maybe ParquetCompression
blockSizeBytes :: Maybe Natural
$sel:writerVersion:ParquetSerDe' :: ParquetSerDe -> Maybe ParquetWriterVersion
$sel:pageSizeBytes:ParquetSerDe' :: ParquetSerDe -> Maybe Natural
$sel:maxPaddingBytes:ParquetSerDe' :: ParquetSerDe -> Maybe Natural
$sel:enableDictionaryCompression:ParquetSerDe' :: ParquetSerDe -> Maybe Bool
$sel:compression:ParquetSerDe' :: ParquetSerDe -> Maybe ParquetCompression
$sel:blockSizeBytes:ParquetSerDe' :: ParquetSerDe -> Maybe Natural
..} =
    Int
_salt
      forall a. Hashable a => Int -> a -> Int
`Prelude.hashWithSalt` Maybe Natural
blockSizeBytes
      forall a. Hashable a => Int -> a -> Int
`Prelude.hashWithSalt` Maybe ParquetCompression
compression
      forall a. Hashable a => Int -> a -> Int
`Prelude.hashWithSalt` Maybe Bool
enableDictionaryCompression
      forall a. Hashable a => Int -> a -> Int
`Prelude.hashWithSalt` Maybe Natural
maxPaddingBytes
      forall a. Hashable a => Int -> a -> Int
`Prelude.hashWithSalt` Maybe Natural
pageSizeBytes
      forall a. Hashable a => Int -> a -> Int
`Prelude.hashWithSalt` Maybe ParquetWriterVersion
writerVersion

instance Prelude.NFData ParquetSerDe where
  rnf :: ParquetSerDe -> ()
rnf ParquetSerDe' {Maybe Bool
Maybe Natural
Maybe ParquetCompression
Maybe ParquetWriterVersion
writerVersion :: Maybe ParquetWriterVersion
pageSizeBytes :: Maybe Natural
maxPaddingBytes :: Maybe Natural
enableDictionaryCompression :: Maybe Bool
compression :: Maybe ParquetCompression
blockSizeBytes :: Maybe Natural
$sel:writerVersion:ParquetSerDe' :: ParquetSerDe -> Maybe ParquetWriterVersion
$sel:pageSizeBytes:ParquetSerDe' :: ParquetSerDe -> Maybe Natural
$sel:maxPaddingBytes:ParquetSerDe' :: ParquetSerDe -> Maybe Natural
$sel:enableDictionaryCompression:ParquetSerDe' :: ParquetSerDe -> Maybe Bool
$sel:compression:ParquetSerDe' :: ParquetSerDe -> Maybe ParquetCompression
$sel:blockSizeBytes:ParquetSerDe' :: ParquetSerDe -> Maybe Natural
..} =
    forall a. NFData a => a -> ()
Prelude.rnf Maybe Natural
blockSizeBytes
      seq :: forall a b. a -> b -> b
`Prelude.seq` forall a. NFData a => a -> ()
Prelude.rnf Maybe ParquetCompression
compression
      seq :: forall a b. a -> b -> b
`Prelude.seq` forall a. NFData a => a -> ()
Prelude.rnf Maybe Bool
enableDictionaryCompression
      seq :: forall a b. a -> b -> b
`Prelude.seq` forall a. NFData a => a -> ()
Prelude.rnf Maybe Natural
maxPaddingBytes
      seq :: forall a b. a -> b -> b
`Prelude.seq` forall a. NFData a => a -> ()
Prelude.rnf Maybe Natural
pageSizeBytes
      seq :: forall a b. a -> b -> b
`Prelude.seq` forall a. NFData a => a -> ()
Prelude.rnf Maybe ParquetWriterVersion
writerVersion

instance Data.ToJSON ParquetSerDe where
  toJSON :: ParquetSerDe -> Value
toJSON ParquetSerDe' {Maybe Bool
Maybe Natural
Maybe ParquetCompression
Maybe ParquetWriterVersion
writerVersion :: Maybe ParquetWriterVersion
pageSizeBytes :: Maybe Natural
maxPaddingBytes :: Maybe Natural
enableDictionaryCompression :: Maybe Bool
compression :: Maybe ParquetCompression
blockSizeBytes :: Maybe Natural
$sel:writerVersion:ParquetSerDe' :: ParquetSerDe -> Maybe ParquetWriterVersion
$sel:pageSizeBytes:ParquetSerDe' :: ParquetSerDe -> Maybe Natural
$sel:maxPaddingBytes:ParquetSerDe' :: ParquetSerDe -> Maybe Natural
$sel:enableDictionaryCompression:ParquetSerDe' :: ParquetSerDe -> Maybe Bool
$sel:compression:ParquetSerDe' :: ParquetSerDe -> Maybe ParquetCompression
$sel:blockSizeBytes:ParquetSerDe' :: ParquetSerDe -> Maybe Natural
..} =
    [Pair] -> Value
Data.object
      ( forall a. [Maybe a] -> [a]
Prelude.catMaybes
          [ (Key
"BlockSizeBytes" forall kv v. (KeyValue kv, ToJSON v) => Key -> v -> kv
Data..=)
              forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> Maybe Natural
blockSizeBytes,
            (Key
"Compression" forall kv v. (KeyValue kv, ToJSON v) => Key -> v -> kv
Data..=) forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> Maybe ParquetCompression
compression,
            (Key
"EnableDictionaryCompression" forall kv v. (KeyValue kv, ToJSON v) => Key -> v -> kv
Data..=)
              forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> Maybe Bool
enableDictionaryCompression,
            (Key
"MaxPaddingBytes" forall kv v. (KeyValue kv, ToJSON v) => Key -> v -> kv
Data..=)
              forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> Maybe Natural
maxPaddingBytes,
            (Key
"PageSizeBytes" forall kv v. (KeyValue kv, ToJSON v) => Key -> v -> kv
Data..=) forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> Maybe Natural
pageSizeBytes,
            (Key
"WriterVersion" forall kv v. (KeyValue kv, ToJSON v) => Key -> v -> kv
Data..=) forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> Maybe ParquetWriterVersion
writerVersion
          ]
      )