amazonka-elastictranscoder-1.6.0: Amazon Elastic Transcoder SDK.

Copyright(c) 2013-2018 Brendan Hay
LicenseMozilla Public License, v. 2.0.
MaintainerBrendan Hay <brendan.g.hay+amazonka@gmail.com>
Stabilityauto-generated
Portabilitynon-portable (GHC extensions)
Safe HaskellNone
LanguageHaskell2010

Network.AWS.ElasticTranscoder.Types

Contents

Description

 

Synopsis

Service Configuration

elasticTranscoder :: Service Source #

API version 2012-09-25 of the Amazon Elastic Transcoder SDK configuration.

Errors

_IncompatibleVersionException :: AsError a => Getting (First ServiceError) a ServiceError Source #

Prism for IncompatibleVersionException' errors.

_ValidationException :: AsError a => Getting (First ServiceError) a ServiceError Source #

One or more required parameter values were not provided in the request.

_AccessDeniedException :: AsError a => Getting (First ServiceError) a ServiceError Source #

General authentication failure. The request was not signed correctly.

_InternalServiceException :: AsError a => Getting (First ServiceError) a ServiceError Source #

Elastic Transcoder encountered an unexpected exception while trying to fulfill the request.

_ResourceNotFoundException :: AsError a => Getting (First ServiceError) a ServiceError Source #

The requested resource does not exist or is not available. For example, the pipeline to which you're trying to add a job doesn't exist or is still being created.

_LimitExceededException :: AsError a => Getting (First ServiceError) a ServiceError Source #

Too many operations for a given AWS account. For example, the number of pipelines exceeds the maximum allowed.

_ResourceInUseException :: AsError a => Getting (First ServiceError) a ServiceError Source #

The resource you are attempting to change is in use. For example, you are attempting to delete a pipeline that is currently in use.

Artwork

data Artwork Source #

The file to be used as album art. There can be multiple artworks associated with an audio file, to a maximum of 20.

To remove artwork or leave the artwork empty, you can either set Artwork to null, or set the Merge Policy to Replace and use an empty Artwork array.

To pass through existing artwork unchanged, set the Merge Policy to Prepend, Append, or Fallback, and use an empty Artwork array.

See: artwork smart constructor.

Instances

Eq Artwork Source # 

Methods

(==) :: Artwork -> Artwork -> Bool #

(/=) :: Artwork -> Artwork -> Bool #

Data Artwork Source # 

Methods

gfoldl :: (forall d b. Data d => c (d -> b) -> d -> c b) -> (forall g. g -> c g) -> Artwork -> c Artwork #

gunfold :: (forall b r. Data b => c (b -> r) -> c r) -> (forall r. r -> c r) -> Constr -> c Artwork #

toConstr :: Artwork -> Constr #

dataTypeOf :: Artwork -> DataType #

dataCast1 :: Typeable (* -> *) t => (forall d. Data d => c (t d)) -> Maybe (c Artwork) #

dataCast2 :: Typeable (* -> * -> *) t => (forall d e. (Data d, Data e) => c (t d e)) -> Maybe (c Artwork) #

gmapT :: (forall b. Data b => b -> b) -> Artwork -> Artwork #

gmapQl :: (r -> r' -> r) -> r -> (forall d. Data d => d -> r') -> Artwork -> r #

gmapQr :: (r' -> r -> r) -> r -> (forall d. Data d => d -> r') -> Artwork -> r #

gmapQ :: (forall d. Data d => d -> u) -> Artwork -> [u] #

gmapQi :: Int -> (forall d. Data d => d -> u) -> Artwork -> u #

gmapM :: Monad m => (forall d. Data d => d -> m d) -> Artwork -> m Artwork #

gmapMp :: MonadPlus m => (forall d. Data d => d -> m d) -> Artwork -> m Artwork #

gmapMo :: MonadPlus m => (forall d. Data d => d -> m d) -> Artwork -> m Artwork #

Read Artwork Source # 
Show Artwork Source # 
Generic Artwork Source # 

Associated Types

type Rep Artwork :: * -> * #

Methods

from :: Artwork -> Rep Artwork x #

to :: Rep Artwork x -> Artwork #

Hashable Artwork Source # 

Methods

hashWithSalt :: Int -> Artwork -> Int #

hash :: Artwork -> Int #

ToJSON Artwork Source # 
FromJSON Artwork Source # 
NFData Artwork Source # 

Methods

rnf :: Artwork -> () #

type Rep Artwork Source # 
type Rep Artwork = D1 * (MetaData "Artwork" "Network.AWS.ElasticTranscoder.Types.Product" "amazonka-elastictranscoder-1.6.0-38CPPD4da2X1lqPFWRIqIv" False) (C1 * (MetaCons "Artwork'" PrefixI True) ((:*:) * ((:*:) * (S1 * (MetaSel (Just Symbol "_aSizingPolicy") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))) ((:*:) * (S1 * (MetaSel (Just Symbol "_aAlbumArtFormat") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))) (S1 * (MetaSel (Just Symbol "_aMaxHeight") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))))) ((:*:) * ((:*:) * (S1 * (MetaSel (Just Symbol "_aInputKey") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))) (S1 * (MetaSel (Just Symbol "_aPaddingPolicy") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text)))) ((:*:) * (S1 * (MetaSel (Just Symbol "_aEncryption") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Encryption))) (S1 * (MetaSel (Just Symbol "_aMaxWidth") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text)))))))

artwork :: Artwork Source #

Creates a value of Artwork with the minimum fields required to make a request.

Use one of the following lenses to modify other fields as desired:

  • aSizingPolicy - Specify one of the following values to control scaling of the output album art: * Fit: Elastic Transcoder scales the output art so it matches the value that you specified in either MaxWidth or MaxHeight without exceeding the other value. * Fill: Elastic Transcoder scales the output art so it matches the value that you specified in either MaxWidth or MaxHeight and matches or exceeds the other value. Elastic Transcoder centers the output art and then crops it in the dimension (if any) that exceeds the maximum value. * Stretch: Elastic Transcoder stretches the output art to match the values that you specified for MaxWidth and MaxHeight . If the relative proportions of the input art and the output art are different, the output art will be distorted. * Keep: Elastic Transcoder does not scale the output art. If either dimension of the input art exceeds the values that you specified for MaxWidth and MaxHeight , Elastic Transcoder crops the output art. * ShrinkToFit: Elastic Transcoder scales the output art down so that its dimensions match the values that you specified for at least one of MaxWidth and MaxHeight without exceeding either value. If you specify this option, Elastic Transcoder does not scale the art up. * ShrinkToFill Elastic Transcoder scales the output art down so that its dimensions match the values that you specified for at least one of MaxWidth and MaxHeight without dropping below either value. If you specify this option, Elastic Transcoder does not scale the art up.
  • aAlbumArtFormat - The format of album art, if any. Valid formats are .jpg and .png .
  • aMaxHeight - The maximum height of the output album art in pixels. If you specify auto , Elastic Transcoder uses 600 as the default value. If you specify a numeric value, enter an even integer between 32 and 3072, inclusive.
  • aInputKey - The name of the file to be used as album art. To determine which Amazon S3 bucket contains the specified file, Elastic Transcoder checks the pipeline specified by PipelineId ; the InputBucket object in that pipeline identifies the bucket. If the file name includes a prefix, for example, cooking/pie.jpg , include the prefix in the key. If the file isn't in the specified bucket, Elastic Transcoder returns an error.
  • aPaddingPolicy - When you set PaddingPolicy to Pad , Elastic Transcoder may add white bars to the top and bottom and/or left and right sides of the output album art to make the total size of the output art match the values that you specified for MaxWidth and MaxHeight .
  • aEncryption - The encryption settings, if any, that you want Elastic Transcoder to apply to your artwork.
  • aMaxWidth - The maximum width of the output album art in pixels. If you specify auto , Elastic Transcoder uses 600 as the default value. If you specify a numeric value, enter an even integer between 32 and 4096, inclusive.

aSizingPolicy :: Lens' Artwork (Maybe Text) Source #

Specify one of the following values to control scaling of the output album art: * Fit: Elastic Transcoder scales the output art so it matches the value that you specified in either MaxWidth or MaxHeight without exceeding the other value. * Fill: Elastic Transcoder scales the output art so it matches the value that you specified in either MaxWidth or MaxHeight and matches or exceeds the other value. Elastic Transcoder centers the output art and then crops it in the dimension (if any) that exceeds the maximum value. * Stretch: Elastic Transcoder stretches the output art to match the values that you specified for MaxWidth and MaxHeight . If the relative proportions of the input art and the output art are different, the output art will be distorted. * Keep: Elastic Transcoder does not scale the output art. If either dimension of the input art exceeds the values that you specified for MaxWidth and MaxHeight , Elastic Transcoder crops the output art. * ShrinkToFit: Elastic Transcoder scales the output art down so that its dimensions match the values that you specified for at least one of MaxWidth and MaxHeight without exceeding either value. If you specify this option, Elastic Transcoder does not scale the art up. * ShrinkToFill Elastic Transcoder scales the output art down so that its dimensions match the values that you specified for at least one of MaxWidth and MaxHeight without dropping below either value. If you specify this option, Elastic Transcoder does not scale the art up.

aAlbumArtFormat :: Lens' Artwork (Maybe Text) Source #

The format of album art, if any. Valid formats are .jpg and .png .

aMaxHeight :: Lens' Artwork (Maybe Text) Source #

The maximum height of the output album art in pixels. If you specify auto , Elastic Transcoder uses 600 as the default value. If you specify a numeric value, enter an even integer between 32 and 3072, inclusive.

aInputKey :: Lens' Artwork (Maybe Text) Source #

The name of the file to be used as album art. To determine which Amazon S3 bucket contains the specified file, Elastic Transcoder checks the pipeline specified by PipelineId ; the InputBucket object in that pipeline identifies the bucket. If the file name includes a prefix, for example, cooking/pie.jpg , include the prefix in the key. If the file isn't in the specified bucket, Elastic Transcoder returns an error.

aPaddingPolicy :: Lens' Artwork (Maybe Text) Source #

When you set PaddingPolicy to Pad , Elastic Transcoder may add white bars to the top and bottom and/or left and right sides of the output album art to make the total size of the output art match the values that you specified for MaxWidth and MaxHeight .

aEncryption :: Lens' Artwork (Maybe Encryption) Source #

The encryption settings, if any, that you want Elastic Transcoder to apply to your artwork.

aMaxWidth :: Lens' Artwork (Maybe Text) Source #

The maximum width of the output album art in pixels. If you specify auto , Elastic Transcoder uses 600 as the default value. If you specify a numeric value, enter an even integer between 32 and 4096, inclusive.

AudioCodecOptions

data AudioCodecOptions Source #

Options associated with your audio codec.

See: audioCodecOptions smart constructor.

Instances

Eq AudioCodecOptions Source # 
Data AudioCodecOptions Source # 

Methods

gfoldl :: (forall d b. Data d => c (d -> b) -> d -> c b) -> (forall g. g -> c g) -> AudioCodecOptions -> c AudioCodecOptions #

gunfold :: (forall b r. Data b => c (b -> r) -> c r) -> (forall r. r -> c r) -> Constr -> c AudioCodecOptions #

toConstr :: AudioCodecOptions -> Constr #

dataTypeOf :: AudioCodecOptions -> DataType #

dataCast1 :: Typeable (* -> *) t => (forall d. Data d => c (t d)) -> Maybe (c AudioCodecOptions) #

dataCast2 :: Typeable (* -> * -> *) t => (forall d e. (Data d, Data e) => c (t d e)) -> Maybe (c AudioCodecOptions) #

gmapT :: (forall b. Data b => b -> b) -> AudioCodecOptions -> AudioCodecOptions #

gmapQl :: (r -> r' -> r) -> r -> (forall d. Data d => d -> r') -> AudioCodecOptions -> r #

gmapQr :: (r' -> r -> r) -> r -> (forall d. Data d => d -> r') -> AudioCodecOptions -> r #

gmapQ :: (forall d. Data d => d -> u) -> AudioCodecOptions -> [u] #

gmapQi :: Int -> (forall d. Data d => d -> u) -> AudioCodecOptions -> u #

gmapM :: Monad m => (forall d. Data d => d -> m d) -> AudioCodecOptions -> m AudioCodecOptions #

gmapMp :: MonadPlus m => (forall d. Data d => d -> m d) -> AudioCodecOptions -> m AudioCodecOptions #

gmapMo :: MonadPlus m => (forall d. Data d => d -> m d) -> AudioCodecOptions -> m AudioCodecOptions #

Read AudioCodecOptions Source # 
Show AudioCodecOptions Source # 
Generic AudioCodecOptions Source # 
Hashable AudioCodecOptions Source # 
ToJSON AudioCodecOptions Source # 
FromJSON AudioCodecOptions Source # 
NFData AudioCodecOptions Source # 

Methods

rnf :: AudioCodecOptions -> () #

type Rep AudioCodecOptions Source # 
type Rep AudioCodecOptions = D1 * (MetaData "AudioCodecOptions" "Network.AWS.ElasticTranscoder.Types.Product" "amazonka-elastictranscoder-1.6.0-38CPPD4da2X1lqPFWRIqIv" False) (C1 * (MetaCons "AudioCodecOptions'" PrefixI True) ((:*:) * ((:*:) * (S1 * (MetaSel (Just Symbol "_acoSigned") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))) (S1 * (MetaSel (Just Symbol "_acoBitDepth") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text)))) ((:*:) * (S1 * (MetaSel (Just Symbol "_acoProfile") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))) (S1 * (MetaSel (Just Symbol "_acoBitOrder") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))))))

audioCodecOptions :: AudioCodecOptions Source #

Creates a value of AudioCodecOptions with the minimum fields required to make a request.

Use one of the following lenses to modify other fields as desired:

  • acoSigned - You can only choose whether an audio sample is signed when you specify pcm for the value of Audio:Codec. Whether audio samples are represented with negative and positive numbers (signed) or only positive numbers (unsigned). The supported value is Signed .
  • acoBitDepth - You can only choose an audio bit depth when you specify flac or pcm for the value of Audio:Codec. The bit depth of a sample is how many bits of information are included in the audio samples. The higher the bit depth, the better the audio, but the larger the file. Valid values are 16 and 24 . The most common bit depth is 24 .
  • acoProfile - You can only choose an audio profile when you specify AAC for the value of Audio:Codec. Specify the AAC profile for the output file. Elastic Transcoder supports the following profiles: * auto : If you specify auto , Elastic Transcoder selects the profile based on the bit rate selected for the output file. * AAC-LC : The most common AAC profile. Use for bit rates larger than 64 kbps. * HE-AAC : Not supported on some older players and devices. Use for bit rates between 40 and 80 kbps. * HE-AACv2 : Not supported on some players and devices. Use for bit rates less than 48 kbps. All outputs in a Smooth playlist must have the same value for Profile .
  • acoBitOrder - You can only choose an audio bit order when you specify pcm for the value of Audio:Codec. The order the bits of a PCM sample are stored in. The supported value is LittleEndian .

acoSigned :: Lens' AudioCodecOptions (Maybe Text) Source #

You can only choose whether an audio sample is signed when you specify pcm for the value of Audio:Codec. Whether audio samples are represented with negative and positive numbers (signed) or only positive numbers (unsigned). The supported value is Signed .

acoBitDepth :: Lens' AudioCodecOptions (Maybe Text) Source #

You can only choose an audio bit depth when you specify flac or pcm for the value of Audio:Codec. The bit depth of a sample is how many bits of information are included in the audio samples. The higher the bit depth, the better the audio, but the larger the file. Valid values are 16 and 24 . The most common bit depth is 24 .

acoProfile :: Lens' AudioCodecOptions (Maybe Text) Source #

You can only choose an audio profile when you specify AAC for the value of Audio:Codec. Specify the AAC profile for the output file. Elastic Transcoder supports the following profiles: * auto : If you specify auto , Elastic Transcoder selects the profile based on the bit rate selected for the output file. * AAC-LC : The most common AAC profile. Use for bit rates larger than 64 kbps. * HE-AAC : Not supported on some older players and devices. Use for bit rates between 40 and 80 kbps. * HE-AACv2 : Not supported on some players and devices. Use for bit rates less than 48 kbps. All outputs in a Smooth playlist must have the same value for Profile .

acoBitOrder :: Lens' AudioCodecOptions (Maybe Text) Source #

You can only choose an audio bit order when you specify pcm for the value of Audio:Codec. The order the bits of a PCM sample are stored in. The supported value is LittleEndian .

AudioParameters

data AudioParameters Source #

Parameters required for transcoding audio.

See: audioParameters smart constructor.

Instances

Eq AudioParameters Source # 
Data AudioParameters Source # 

Methods

gfoldl :: (forall d b. Data d => c (d -> b) -> d -> c b) -> (forall g. g -> c g) -> AudioParameters -> c AudioParameters #

gunfold :: (forall b r. Data b => c (b -> r) -> c r) -> (forall r. r -> c r) -> Constr -> c AudioParameters #

toConstr :: AudioParameters -> Constr #

dataTypeOf :: AudioParameters -> DataType #

dataCast1 :: Typeable (* -> *) t => (forall d. Data d => c (t d)) -> Maybe (c AudioParameters) #

dataCast2 :: Typeable (* -> * -> *) t => (forall d e. (Data d, Data e) => c (t d e)) -> Maybe (c AudioParameters) #

gmapT :: (forall b. Data b => b -> b) -> AudioParameters -> AudioParameters #

gmapQl :: (r -> r' -> r) -> r -> (forall d. Data d => d -> r') -> AudioParameters -> r #

gmapQr :: (r' -> r -> r) -> r -> (forall d. Data d => d -> r') -> AudioParameters -> r #

gmapQ :: (forall d. Data d => d -> u) -> AudioParameters -> [u] #

gmapQi :: Int -> (forall d. Data d => d -> u) -> AudioParameters -> u #

gmapM :: Monad m => (forall d. Data d => d -> m d) -> AudioParameters -> m AudioParameters #

gmapMp :: MonadPlus m => (forall d. Data d => d -> m d) -> AudioParameters -> m AudioParameters #

gmapMo :: MonadPlus m => (forall d. Data d => d -> m d) -> AudioParameters -> m AudioParameters #

Read AudioParameters Source # 
Show AudioParameters Source # 
Generic AudioParameters Source # 
Hashable AudioParameters Source # 
ToJSON AudioParameters Source # 
FromJSON AudioParameters Source # 
NFData AudioParameters Source # 

Methods

rnf :: AudioParameters -> () #

type Rep AudioParameters Source # 
type Rep AudioParameters = D1 * (MetaData "AudioParameters" "Network.AWS.ElasticTranscoder.Types.Product" "amazonka-elastictranscoder-1.6.0-38CPPD4da2X1lqPFWRIqIv" False) (C1 * (MetaCons "AudioParameters'" PrefixI True) ((:*:) * ((:*:) * (S1 * (MetaSel (Just Symbol "_apChannels") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))) ((:*:) * (S1 * (MetaSel (Just Symbol "_apCodec") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))) (S1 * (MetaSel (Just Symbol "_apAudioPackingMode") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))))) ((:*:) * (S1 * (MetaSel (Just Symbol "_apSampleRate") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))) ((:*:) * (S1 * (MetaSel (Just Symbol "_apBitRate") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))) (S1 * (MetaSel (Just Symbol "_apCodecOptions") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe AudioCodecOptions)))))))

audioParameters :: AudioParameters Source #

Creates a value of AudioParameters with the minimum fields required to make a request.

Use one of the following lenses to modify other fields as desired:

  • apChannels - The number of audio channels in the output file. The following values are valid: auto , 0 , 1 , 2 One channel carries the information played by a single speaker. For example, a stereo track with two channels sends one channel to the left speaker, and the other channel to the right speaker. The output channels are organized into tracks. If you want Elastic Transcoder to automatically detect the number of audio channels in the input file and use that value for the output file, select auto . The output of a specific channel value and inputs are as follows: * auto channel specified, with any input: Pass through up to eight input channels. * 0 channels specified, with any input: Audio omitted from the output. * 1 channel specified, with at least one input channel: Mono sound. * 2 channels specified, with any input: Two identical mono channels or stereo. For more information about tracks, see Audio:AudioPackingMode. For more information about how Elastic Transcoder organizes channels and tracks, see Audio:AudioPackingMode .
  • apCodec - The audio codec for the output file. Valid values include aac , flac , mp2 , mp3 , pcm , and vorbis .
  • apAudioPackingMode - The method of organizing audio channels and tracks. Use Audio:Channels to specify the number of channels in your output, and Audio:AudioPackingMode to specify the number of tracks and their relation to the channels. If you do not specify an Audio:AudioPackingMode , Elastic Transcoder uses SingleTrack . The following values are valid: SingleTrack , OneChannelPerTrack , and OneChannelPerTrackWithMosTo8Tracks When you specify SingleTrack , Elastic Transcoder creates a single track for your output. The track can have up to eight channels. Use SingleTrack for all non-mxf containers. The outputs of SingleTrack for a specific channel value and inputs are as follows: * 0 channels with any input: Audio omitted from the output * 1, 2, or auto channels with no audio input: Audio omitted from the output * 1 channel with any input with audio: One track with one channel, downmixed if necessary * 2 channels with one track with one channel: One track with two identical channels * 2 or auto channels with two tracks with one channel each: One track with two channels * 2 or auto channels with one track with two channels: One track with two channels * 2 channels with one track with multiple channels: One track with two channels * auto channels with one track with one channel: One track with one channel * auto channels with one track with multiple channels: One track with multiple channels When you specify OneChannelPerTrack , Elastic Transcoder creates a new track for every channel in your output. Your output can have up to eight single-channel tracks. The outputs of OneChannelPerTrack for a specific channel value and inputs are as follows: * 0 channels with any input: Audio omitted from the output * 1, 2, or auto channels with no audio input: Audio omitted from the output * 1 channel with any input with audio: One track with one channel, downmixed if necessary * 2 channels with one track with one channel: Two tracks with one identical channel each * 2 or auto channels with two tracks with one channel each: Two tracks with one channel each * 2 or auto channels with one track with two channels: Two tracks with one channel each * 2 channels with one track with multiple channels: Two tracks with one channel each * auto channels with one track with one channel: One track with one channel * auto channels with one track with multiple channels: Up to eight tracks with one channel each When you specify OneChannelPerTrackWithMosTo8Tracks , Elastic Transcoder creates eight single-channel tracks for your output. All tracks that do not contain audio data from an input channel are MOS, or Mit Out Sound, tracks. The outputs of OneChannelPerTrackWithMosTo8Tracks for a specific channel value and inputs are as follows: * 0 channels with any input: Audio omitted from the output * 1, 2, or auto channels with no audio input: Audio omitted from the output * 1 channel with any input with audio: One track with one channel, downmixed if necessary, plus six MOS tracks * 2 channels with one track with one channel: Two tracks with one identical channel each, plus six MOS tracks * 2 or auto channels with two tracks with one channel each: Two tracks with one channel each, plus six MOS tracks * 2 or auto channels with one track with two channels: Two tracks with one channel each, plus six MOS tracks * 2 channels with one track with multiple channels: Two tracks with one channel each, plus six MOS tracks * auto channels with one track with one channel: One track with one channel, plus seven MOS tracks * auto channels with one track with multiple channels: Up to eight tracks with one channel each, plus MOS tracks until there are eight tracks in all
  • apSampleRate - The sample rate of the audio stream in the output file, in Hertz. Valid values include: auto , 22050 , 32000 , 44100 , 48000 , 96000 If you specify auto , Elastic Transcoder automatically detects the sample rate.
  • apBitRate - The bit rate of the audio stream in the output file, in kilobits/second. Enter an integer between 64 and 320, inclusive.
  • apCodecOptions - If you specified AAC for Audio:Codec , this is the AAC compression profile to use. Valid values include: auto , AAC-LC , HE-AAC , HE-AACv2 If you specify auto , Elastic Transcoder chooses a profile based on the bit rate of the output file.

apChannels :: Lens' AudioParameters (Maybe Text) Source #

The number of audio channels in the output file. The following values are valid: auto , 0 , 1 , 2 One channel carries the information played by a single speaker. For example, a stereo track with two channels sends one channel to the left speaker, and the other channel to the right speaker. The output channels are organized into tracks. If you want Elastic Transcoder to automatically detect the number of audio channels in the input file and use that value for the output file, select auto . The output of a specific channel value and inputs are as follows: * auto channel specified, with any input: Pass through up to eight input channels. * 0 channels specified, with any input: Audio omitted from the output. * 1 channel specified, with at least one input channel: Mono sound. * 2 channels specified, with any input: Two identical mono channels or stereo. For more information about tracks, see Audio:AudioPackingMode. For more information about how Elastic Transcoder organizes channels and tracks, see Audio:AudioPackingMode .

apCodec :: Lens' AudioParameters (Maybe Text) Source #

The audio codec for the output file. Valid values include aac , flac , mp2 , mp3 , pcm , and vorbis .

apAudioPackingMode :: Lens' AudioParameters (Maybe Text) Source #

The method of organizing audio channels and tracks. Use Audio:Channels to specify the number of channels in your output, and Audio:AudioPackingMode to specify the number of tracks and their relation to the channels. If you do not specify an Audio:AudioPackingMode , Elastic Transcoder uses SingleTrack . The following values are valid: SingleTrack , OneChannelPerTrack , and OneChannelPerTrackWithMosTo8Tracks When you specify SingleTrack , Elastic Transcoder creates a single track for your output. The track can have up to eight channels. Use SingleTrack for all non-mxf containers. The outputs of SingleTrack for a specific channel value and inputs are as follows: * 0 channels with any input: Audio omitted from the output * 1, 2, or auto channels with no audio input: Audio omitted from the output * 1 channel with any input with audio: One track with one channel, downmixed if necessary * 2 channels with one track with one channel: One track with two identical channels * 2 or auto channels with two tracks with one channel each: One track with two channels * 2 or auto channels with one track with two channels: One track with two channels * 2 channels with one track with multiple channels: One track with two channels * auto channels with one track with one channel: One track with one channel * auto channels with one track with multiple channels: One track with multiple channels When you specify OneChannelPerTrack , Elastic Transcoder creates a new track for every channel in your output. Your output can have up to eight single-channel tracks. The outputs of OneChannelPerTrack for a specific channel value and inputs are as follows: * 0 channels with any input: Audio omitted from the output * 1, 2, or auto channels with no audio input: Audio omitted from the output * 1 channel with any input with audio: One track with one channel, downmixed if necessary * 2 channels with one track with one channel: Two tracks with one identical channel each * 2 or auto channels with two tracks with one channel each: Two tracks with one channel each * 2 or auto channels with one track with two channels: Two tracks with one channel each * 2 channels with one track with multiple channels: Two tracks with one channel each * auto channels with one track with one channel: One track with one channel * auto channels with one track with multiple channels: Up to eight tracks with one channel each When you specify OneChannelPerTrackWithMosTo8Tracks , Elastic Transcoder creates eight single-channel tracks for your output. All tracks that do not contain audio data from an input channel are MOS, or Mit Out Sound, tracks. The outputs of OneChannelPerTrackWithMosTo8Tracks for a specific channel value and inputs are as follows: * 0 channels with any input: Audio omitted from the output * 1, 2, or auto channels with no audio input: Audio omitted from the output * 1 channel with any input with audio: One track with one channel, downmixed if necessary, plus six MOS tracks * 2 channels with one track with one channel: Two tracks with one identical channel each, plus six MOS tracks * 2 or auto channels with two tracks with one channel each: Two tracks with one channel each, plus six MOS tracks * 2 or auto channels with one track with two channels: Two tracks with one channel each, plus six MOS tracks * 2 channels with one track with multiple channels: Two tracks with one channel each, plus six MOS tracks * auto channels with one track with one channel: One track with one channel, plus seven MOS tracks * auto channels with one track with multiple channels: Up to eight tracks with one channel each, plus MOS tracks until there are eight tracks in all

apSampleRate :: Lens' AudioParameters (Maybe Text) Source #

The sample rate of the audio stream in the output file, in Hertz. Valid values include: auto , 22050 , 32000 , 44100 , 48000 , 96000 If you specify auto , Elastic Transcoder automatically detects the sample rate.

apBitRate :: Lens' AudioParameters (Maybe Text) Source #

The bit rate of the audio stream in the output file, in kilobits/second. Enter an integer between 64 and 320, inclusive.

apCodecOptions :: Lens' AudioParameters (Maybe AudioCodecOptions) Source #

If you specified AAC for Audio:Codec , this is the AAC compression profile to use. Valid values include: auto , AAC-LC , HE-AAC , HE-AACv2 If you specify auto , Elastic Transcoder chooses a profile based on the bit rate of the output file.

CaptionFormat

data CaptionFormat Source #

The file format of the output captions. If you leave this value blank, Elastic Transcoder returns an error.

See: captionFormat smart constructor.

Instances

Eq CaptionFormat Source # 
Data CaptionFormat Source # 

Methods

gfoldl :: (forall d b. Data d => c (d -> b) -> d -> c b) -> (forall g. g -> c g) -> CaptionFormat -> c CaptionFormat #

gunfold :: (forall b r. Data b => c (b -> r) -> c r) -> (forall r. r -> c r) -> Constr -> c CaptionFormat #

toConstr :: CaptionFormat -> Constr #

dataTypeOf :: CaptionFormat -> DataType #

dataCast1 :: Typeable (* -> *) t => (forall d. Data d => c (t d)) -> Maybe (c CaptionFormat) #

dataCast2 :: Typeable (* -> * -> *) t => (forall d e. (Data d, Data e) => c (t d e)) -> Maybe (c CaptionFormat) #

gmapT :: (forall b. Data b => b -> b) -> CaptionFormat -> CaptionFormat #

gmapQl :: (r -> r' -> r) -> r -> (forall d. Data d => d -> r') -> CaptionFormat -> r #

gmapQr :: (r' -> r -> r) -> r -> (forall d. Data d => d -> r') -> CaptionFormat -> r #

gmapQ :: (forall d. Data d => d -> u) -> CaptionFormat -> [u] #

gmapQi :: Int -> (forall d. Data d => d -> u) -> CaptionFormat -> u #

gmapM :: Monad m => (forall d. Data d => d -> m d) -> CaptionFormat -> m CaptionFormat #

gmapMp :: MonadPlus m => (forall d. Data d => d -> m d) -> CaptionFormat -> m CaptionFormat #

gmapMo :: MonadPlus m => (forall d. Data d => d -> m d) -> CaptionFormat -> m CaptionFormat #

Read CaptionFormat Source # 
Show CaptionFormat Source # 
Generic CaptionFormat Source # 

Associated Types

type Rep CaptionFormat :: * -> * #

Hashable CaptionFormat Source # 
ToJSON CaptionFormat Source # 
FromJSON CaptionFormat Source # 
NFData CaptionFormat Source # 

Methods

rnf :: CaptionFormat -> () #

type Rep CaptionFormat Source # 
type Rep CaptionFormat = D1 * (MetaData "CaptionFormat" "Network.AWS.ElasticTranscoder.Types.Product" "amazonka-elastictranscoder-1.6.0-38CPPD4da2X1lqPFWRIqIv" False) (C1 * (MetaCons "CaptionFormat'" PrefixI True) ((:*:) * (S1 * (MetaSel (Just Symbol "_cfPattern") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))) ((:*:) * (S1 * (MetaSel (Just Symbol "_cfFormat") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))) (S1 * (MetaSel (Just Symbol "_cfEncryption") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Encryption))))))

captionFormat :: CaptionFormat Source #

Creates a value of CaptionFormat with the minimum fields required to make a request.

Use one of the following lenses to modify other fields as desired:

  • cfPattern - The prefix for caption filenames, in the form description -{language} , where: * description is a description of the video. * {language} is a literal value that Elastic Transcoder replaces with the two- or three-letter code for the language of the caption in the output file names. If you don't include {language} in the file name pattern, Elastic Transcoder automatically appends "{language} " to the value that you specify for the description. In addition, Elastic Transcoder automatically appends the count to the end of the segment files. For example, suppose you're transcoding into srt format. When you enter "Sydney-{language}-sunrise", and the language of the captions is English (en), the name of the first caption file is be Sydney-en-sunrise00000.srt.
  • cfFormat - The format you specify determines whether Elastic Transcoder generates an embedded or sidecar caption for this output. * Valid Embedded Caption Formats: * for FLAC : None * For MP3 : None * For MP4 : mov-text * For MPEG-TS : None * For ogg : None * For webm : None * Valid Sidecar Caption Formats: Elastic Transcoder supports dfxp (first div element only), scc, srt, and webvtt. If you want ttml or smpte-tt compatible captions, specify dfxp as your output format. * For FMP4 : dfxp * Non-FMP4 outputs : All sidecar types fmp4 captions have an extension of .ismt
  • cfEncryption - The encryption settings, if any, that you want Elastic Transcoder to apply to your caption formats.

cfPattern :: Lens' CaptionFormat (Maybe Text) Source #

The prefix for caption filenames, in the form description -{language} , where: * description is a description of the video. * {language} is a literal value that Elastic Transcoder replaces with the two- or three-letter code for the language of the caption in the output file names. If you don't include {language} in the file name pattern, Elastic Transcoder automatically appends "{language} " to the value that you specify for the description. In addition, Elastic Transcoder automatically appends the count to the end of the segment files. For example, suppose you're transcoding into srt format. When you enter "Sydney-{language}-sunrise", and the language of the captions is English (en), the name of the first caption file is be Sydney-en-sunrise00000.srt.

cfFormat :: Lens' CaptionFormat (Maybe Text) Source #

The format you specify determines whether Elastic Transcoder generates an embedded or sidecar caption for this output. * Valid Embedded Caption Formats: * for FLAC : None * For MP3 : None * For MP4 : mov-text * For MPEG-TS : None * For ogg : None * For webm : None * Valid Sidecar Caption Formats: Elastic Transcoder supports dfxp (first div element only), scc, srt, and webvtt. If you want ttml or smpte-tt compatible captions, specify dfxp as your output format. * For FMP4 : dfxp * Non-FMP4 outputs : All sidecar types fmp4 captions have an extension of .ismt

cfEncryption :: Lens' CaptionFormat (Maybe Encryption) Source #

The encryption settings, if any, that you want Elastic Transcoder to apply to your caption formats.

CaptionSource

data CaptionSource Source #

A source file for the input sidecar captions used during the transcoding process.

See: captionSource smart constructor.

Instances

Eq CaptionSource Source # 
Data CaptionSource Source # 

Methods

gfoldl :: (forall d b. Data d => c (d -> b) -> d -> c b) -> (forall g. g -> c g) -> CaptionSource -> c CaptionSource #

gunfold :: (forall b r. Data b => c (b -> r) -> c r) -> (forall r. r -> c r) -> Constr -> c CaptionSource #

toConstr :: CaptionSource -> Constr #

dataTypeOf :: CaptionSource -> DataType #

dataCast1 :: Typeable (* -> *) t => (forall d. Data d => c (t d)) -> Maybe (c CaptionSource) #

dataCast2 :: Typeable (* -> * -> *) t => (forall d e. (Data d, Data e) => c (t d e)) -> Maybe (c CaptionSource) #

gmapT :: (forall b. Data b => b -> b) -> CaptionSource -> CaptionSource #

gmapQl :: (r -> r' -> r) -> r -> (forall d. Data d => d -> r') -> CaptionSource -> r #

gmapQr :: (r' -> r -> r) -> r -> (forall d. Data d => d -> r') -> CaptionSource -> r #

gmapQ :: (forall d. Data d => d -> u) -> CaptionSource -> [u] #

gmapQi :: Int -> (forall d. Data d => d -> u) -> CaptionSource -> u #

gmapM :: Monad m => (forall d. Data d => d -> m d) -> CaptionSource -> m CaptionSource #

gmapMp :: MonadPlus m => (forall d. Data d => d -> m d) -> CaptionSource -> m CaptionSource #

gmapMo :: MonadPlus m => (forall d. Data d => d -> m d) -> CaptionSource -> m CaptionSource #

Read CaptionSource Source # 
Show CaptionSource Source # 
Generic CaptionSource Source # 

Associated Types

type Rep CaptionSource :: * -> * #

Hashable CaptionSource Source # 
ToJSON CaptionSource Source # 
FromJSON CaptionSource Source # 
NFData CaptionSource Source # 

Methods

rnf :: CaptionSource -> () #

type Rep CaptionSource Source # 
type Rep CaptionSource = D1 * (MetaData "CaptionSource" "Network.AWS.ElasticTranscoder.Types.Product" "amazonka-elastictranscoder-1.6.0-38CPPD4da2X1lqPFWRIqIv" False) (C1 * (MetaCons "CaptionSource'" PrefixI True) ((:*:) * ((:*:) * (S1 * (MetaSel (Just Symbol "_csTimeOffset") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))) (S1 * (MetaSel (Just Symbol "_csEncryption") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Encryption)))) ((:*:) * (S1 * (MetaSel (Just Symbol "_csKey") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))) ((:*:) * (S1 * (MetaSel (Just Symbol "_csLanguage") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))) (S1 * (MetaSel (Just Symbol "_csLabel") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text)))))))

captionSource :: CaptionSource Source #

Creates a value of CaptionSource with the minimum fields required to make a request.

Use one of the following lenses to modify other fields as desired:

  • csTimeOffset - For clip generation or captions that do not start at the same time as the associated video file, the TimeOffset tells Elastic Transcoder how much of the video to encode before including captions. Specify the TimeOffset in the form [+-]SS.sss or [+-]HH:mm:SS.ss.
  • csEncryption - The encryption settings, if any, that Elastic Transcoder needs to decyrpt your caption sources, or that you want Elastic Transcoder to apply to your caption sources.
  • csKey - The name of the sidecar caption file that you want Elastic Transcoder to include in the output file.
  • csLanguage - A string that specifies the language of the caption. If you specified multiple inputs with captions, the caption language must match in order to be included in the output. Specify this as one of: * 2-character ISO 639-1 code * 3-character ISO 639-2 code For more information on ISO language codes and language names, see the List of ISO 639-1 codes.
  • csLabel - The label of the caption shown in the player when choosing a language. We recommend that you put the caption language name here, in the language of the captions.

csTimeOffset :: Lens' CaptionSource (Maybe Text) Source #

For clip generation or captions that do not start at the same time as the associated video file, the TimeOffset tells Elastic Transcoder how much of the video to encode before including captions. Specify the TimeOffset in the form [+-]SS.sss or [+-]HH:mm:SS.ss.

csEncryption :: Lens' CaptionSource (Maybe Encryption) Source #

The encryption settings, if any, that Elastic Transcoder needs to decyrpt your caption sources, or that you want Elastic Transcoder to apply to your caption sources.

csKey :: Lens' CaptionSource (Maybe Text) Source #

The name of the sidecar caption file that you want Elastic Transcoder to include in the output file.

csLanguage :: Lens' CaptionSource (Maybe Text) Source #

A string that specifies the language of the caption. If you specified multiple inputs with captions, the caption language must match in order to be included in the output. Specify this as one of: * 2-character ISO 639-1 code * 3-character ISO 639-2 code For more information on ISO language codes and language names, see the List of ISO 639-1 codes.

csLabel :: Lens' CaptionSource (Maybe Text) Source #

The label of the caption shown in the player when choosing a language. We recommend that you put the caption language name here, in the language of the captions.

Captions

data Captions Source #

The captions to be created, if any.

See: captions smart constructor.

Instances

Eq Captions Source # 
Data Captions Source # 

Methods

gfoldl :: (forall d b. Data d => c (d -> b) -> d -> c b) -> (forall g. g -> c g) -> Captions -> c Captions #

gunfold :: (forall b r. Data b => c (b -> r) -> c r) -> (forall r. r -> c r) -> Constr -> c Captions #

toConstr :: Captions -> Constr #

dataTypeOf :: Captions -> DataType #

dataCast1 :: Typeable (* -> *) t => (forall d. Data d => c (t d)) -> Maybe (c Captions) #

dataCast2 :: Typeable (* -> * -> *) t => (forall d e. (Data d, Data e) => c (t d e)) -> Maybe (c Captions) #

gmapT :: (forall b. Data b => b -> b) -> Captions -> Captions #

gmapQl :: (r -> r' -> r) -> r -> (forall d. Data d => d -> r') -> Captions -> r #

gmapQr :: (r' -> r -> r) -> r -> (forall d. Data d => d -> r') -> Captions -> r #

gmapQ :: (forall d. Data d => d -> u) -> Captions -> [u] #

gmapQi :: Int -> (forall d. Data d => d -> u) -> Captions -> u #

gmapM :: Monad m => (forall d. Data d => d -> m d) -> Captions -> m Captions #

gmapMp :: MonadPlus m => (forall d. Data d => d -> m d) -> Captions -> m Captions #

gmapMo :: MonadPlus m => (forall d. Data d => d -> m d) -> Captions -> m Captions #

Read Captions Source # 
Show Captions Source # 
Generic Captions Source # 

Associated Types

type Rep Captions :: * -> * #

Methods

from :: Captions -> Rep Captions x #

to :: Rep Captions x -> Captions #

Hashable Captions Source # 

Methods

hashWithSalt :: Int -> Captions -> Int #

hash :: Captions -> Int #

ToJSON Captions Source # 
FromJSON Captions Source # 
NFData Captions Source # 

Methods

rnf :: Captions -> () #

type Rep Captions Source # 
type Rep Captions = D1 * (MetaData "Captions" "Network.AWS.ElasticTranscoder.Types.Product" "amazonka-elastictranscoder-1.6.0-38CPPD4da2X1lqPFWRIqIv" False) (C1 * (MetaCons "Captions'" PrefixI True) ((:*:) * (S1 * (MetaSel (Just Symbol "_cMergePolicy") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))) ((:*:) * (S1 * (MetaSel (Just Symbol "_cCaptionSources") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe [CaptionSource]))) (S1 * (MetaSel (Just Symbol "_cCaptionFormats") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe [CaptionFormat]))))))

captions :: Captions Source #

Creates a value of Captions with the minimum fields required to make a request.

Use one of the following lenses to modify other fields as desired:

  • cMergePolicy - A policy that determines how Elastic Transcoder handles the existence of multiple captions. * MergeOverride: Elastic Transcoder transcodes both embedded and sidecar captions into outputs. If captions for a language are embedded in the input file and also appear in a sidecar file, Elastic Transcoder uses the sidecar captions and ignores the embedded captions for that language. * MergeRetain: Elastic Transcoder transcodes both embedded and sidecar captions into outputs. If captions for a language are embedded in the input file and also appear in a sidecar file, Elastic Transcoder uses the embedded captions and ignores the sidecar captions for that language. If CaptionSources is empty, Elastic Transcoder omits all sidecar captions from the output files. * Override: Elastic Transcoder transcodes only the sidecar captions that you specify in CaptionSources . MergePolicy cannot be null.
  • cCaptionSources - Source files for the input sidecar captions used during the transcoding process. To omit all sidecar captions, leave CaptionSources blank.
  • cCaptionFormats - The array of file formats for the output captions. If you leave this value blank, Elastic Transcoder returns an error.

cMergePolicy :: Lens' Captions (Maybe Text) Source #

A policy that determines how Elastic Transcoder handles the existence of multiple captions. * MergeOverride: Elastic Transcoder transcodes both embedded and sidecar captions into outputs. If captions for a language are embedded in the input file and also appear in a sidecar file, Elastic Transcoder uses the sidecar captions and ignores the embedded captions for that language. * MergeRetain: Elastic Transcoder transcodes both embedded and sidecar captions into outputs. If captions for a language are embedded in the input file and also appear in a sidecar file, Elastic Transcoder uses the embedded captions and ignores the sidecar captions for that language. If CaptionSources is empty, Elastic Transcoder omits all sidecar captions from the output files. * Override: Elastic Transcoder transcodes only the sidecar captions that you specify in CaptionSources . MergePolicy cannot be null.

cCaptionSources :: Lens' Captions [CaptionSource] Source #

Source files for the input sidecar captions used during the transcoding process. To omit all sidecar captions, leave CaptionSources blank.

cCaptionFormats :: Lens' Captions [CaptionFormat] Source #

The array of file formats for the output captions. If you leave this value blank, Elastic Transcoder returns an error.

Clip

data Clip Source #

Settings for one clip in a composition. All jobs in a playlist must have the same clip settings.

See: clip smart constructor.

Instances

Eq Clip Source # 

Methods

(==) :: Clip -> Clip -> Bool #

(/=) :: Clip -> Clip -> Bool #

Data Clip Source # 

Methods

gfoldl :: (forall d b. Data d => c (d -> b) -> d -> c b) -> (forall g. g -> c g) -> Clip -> c Clip #

gunfold :: (forall b r. Data b => c (b -> r) -> c r) -> (forall r. r -> c r) -> Constr -> c Clip #

toConstr :: Clip -> Constr #

dataTypeOf :: Clip -> DataType #

dataCast1 :: Typeable (* -> *) t => (forall d. Data d => c (t d)) -> Maybe (c Clip) #

dataCast2 :: Typeable (* -> * -> *) t => (forall d e. (Data d, Data e) => c (t d e)) -> Maybe (c Clip) #

gmapT :: (forall b. Data b => b -> b) -> Clip -> Clip #

gmapQl :: (r -> r' -> r) -> r -> (forall d. Data d => d -> r') -> Clip -> r #

gmapQr :: (r' -> r -> r) -> r -> (forall d. Data d => d -> r') -> Clip -> r #

gmapQ :: (forall d. Data d => d -> u) -> Clip -> [u] #

gmapQi :: Int -> (forall d. Data d => d -> u) -> Clip -> u #

gmapM :: Monad m => (forall d. Data d => d -> m d) -> Clip -> m Clip #

gmapMp :: MonadPlus m => (forall d. Data d => d -> m d) -> Clip -> m Clip #

gmapMo :: MonadPlus m => (forall d. Data d => d -> m d) -> Clip -> m Clip #

Read Clip Source # 
Show Clip Source # 

Methods

showsPrec :: Int -> Clip -> ShowS #

show :: Clip -> String #

showList :: [Clip] -> ShowS #

Generic Clip Source # 

Associated Types

type Rep Clip :: * -> * #

Methods

from :: Clip -> Rep Clip x #

to :: Rep Clip x -> Clip #

Hashable Clip Source # 

Methods

hashWithSalt :: Int -> Clip -> Int #

hash :: Clip -> Int #

ToJSON Clip Source # 
FromJSON Clip Source # 
NFData Clip Source # 

Methods

rnf :: Clip -> () #

type Rep Clip Source # 
type Rep Clip = D1 * (MetaData "Clip" "Network.AWS.ElasticTranscoder.Types.Product" "amazonka-elastictranscoder-1.6.0-38CPPD4da2X1lqPFWRIqIv" True) (C1 * (MetaCons "Clip'" PrefixI True) (S1 * (MetaSel (Just Symbol "_cTimeSpan") NoSourceUnpackedness NoSourceStrictness DecidedLazy) (Rec0 * (Maybe TimeSpan))))

clip :: Clip Source #

Creates a value of Clip with the minimum fields required to make a request.

Use one of the following lenses to modify other fields as desired:

  • cTimeSpan - Settings that determine when a clip begins and how long it lasts.

cTimeSpan :: Lens' Clip (Maybe TimeSpan) Source #

Settings that determine when a clip begins and how long it lasts.

CreateJobOutput

data CreateJobOutput Source #

The CreateJobOutput structure.

See: createJobOutput smart constructor.

Instances

Eq CreateJobOutput Source # 
Data CreateJobOutput Source # 

Methods

gfoldl :: (forall d b. Data d => c (d -> b) -> d -> c b) -> (forall g. g -> c g) -> CreateJobOutput -> c CreateJobOutput #

gunfold :: (forall b r. Data b => c (b -> r) -> c r) -> (forall r. r -> c r) -> Constr -> c CreateJobOutput #

toConstr :: CreateJobOutput -> Constr #

dataTypeOf :: CreateJobOutput -> DataType #

dataCast1 :: Typeable (* -> *) t => (forall d. Data d => c (t d)) -> Maybe (c CreateJobOutput) #

dataCast2 :: Typeable (* -> * -> *) t => (forall d e. (Data d, Data e) => c (t d e)) -> Maybe (c CreateJobOutput) #

gmapT :: (forall b. Data b => b -> b) -> CreateJobOutput -> CreateJobOutput #

gmapQl :: (r -> r' -> r) -> r -> (forall d. Data d => d -> r') -> CreateJobOutput -> r #

gmapQr :: (r' -> r -> r) -> r -> (forall d. Data d => d -> r') -> CreateJobOutput -> r #

gmapQ :: (forall d. Data d => d -> u) -> CreateJobOutput -> [u] #

gmapQi :: Int -> (forall d. Data d => d -> u) -> CreateJobOutput -> u #

gmapM :: Monad m => (forall d. Data d => d -> m d) -> CreateJobOutput -> m CreateJobOutput #

gmapMp :: MonadPlus m => (forall d. Data d => d -> m d) -> CreateJobOutput -> m CreateJobOutput #

gmapMo :: MonadPlus m => (forall d. Data d => d -> m d) -> CreateJobOutput -> m CreateJobOutput #

Read CreateJobOutput Source # 
Show CreateJobOutput Source # 
Generic CreateJobOutput Source # 
Hashable CreateJobOutput Source # 
ToJSON CreateJobOutput Source # 
NFData CreateJobOutput Source # 

Methods

rnf :: CreateJobOutput -> () #

type Rep CreateJobOutput Source # 
type Rep CreateJobOutput = D1 * (MetaData "CreateJobOutput" "Network.AWS.ElasticTranscoder.Types.Product" "amazonka-elastictranscoder-1.6.0-38CPPD4da2X1lqPFWRIqIv" False) (C1 * (MetaCons "CreateJobOutput'" PrefixI True) ((:*:) * ((:*:) * ((:*:) * (S1 * (MetaSel (Just Symbol "_cjoThumbnailPattern") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))) (S1 * (MetaSel (Just Symbol "_cjoCaptions") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Captions)))) ((:*:) * (S1 * (MetaSel (Just Symbol "_cjoPresetId") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))) ((:*:) * (S1 * (MetaSel (Just Symbol "_cjoComposition") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe [Clip]))) (S1 * (MetaSel (Just Symbol "_cjoAlbumArt") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe JobAlbumArt)))))) ((:*:) * ((:*:) * (S1 * (MetaSel (Just Symbol "_cjoWatermarks") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe [JobWatermark]))) ((:*:) * (S1 * (MetaSel (Just Symbol "_cjoEncryption") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Encryption))) (S1 * (MetaSel (Just Symbol "_cjoKey") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))))) ((:*:) * (S1 * (MetaSel (Just Symbol "_cjoSegmentDuration") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))) ((:*:) * (S1 * (MetaSel (Just Symbol "_cjoThumbnailEncryption") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Encryption))) (S1 * (MetaSel (Just Symbol "_cjoRotate") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))))))))

createJobOutput :: CreateJobOutput Source #

Creates a value of CreateJobOutput with the minimum fields required to make a request.

Use one of the following lenses to modify other fields as desired:

  • cjoThumbnailPattern - Whether you want Elastic Transcoder to create thumbnails for your videos and, if so, how you want Elastic Transcoder to name the files. If you don't want Elastic Transcoder to create thumbnails, specify "". If you do want Elastic Transcoder to create thumbnails, specify the information that you want to include in the file name for each thumbnail. You can specify the following values in any sequence: * {count} (Required) : If you want to create thumbnails, you must include {count} in the ThumbnailPattern object. Wherever you specify {count} , Elastic Transcoder adds a five-digit sequence number (beginning with 00001 ) to thumbnail file names. The number indicates where a given thumbnail appears in the sequence of thumbnails for a transcoded file. Important: If you specify a literal value and/or {resolution} but you omit {count} , Elastic Transcoder returns a validation error and does not create the job. * Literal values (Optional) : You can specify literal values anywhere in the ThumbnailPattern object. For example, you can include them as a file name prefix or as a delimiter between {resolution} and {count} . * {resolution} (Optional) : If you want Elastic Transcoder to include the resolution in the file name, include {resolution} in the ThumbnailPattern object. When creating thumbnails, Elastic Transcoder automatically saves the files in the format (.jpg or .png) that appears in the preset that you specified in the PresetID value of CreateJobOutput . Elastic Transcoder also appends the applicable file name extension.
  • cjoCaptions - You can configure Elastic Transcoder to transcode captions, or subtitles, from one format to another. All captions must be in UTF-8. Elastic Transcoder supports two types of captions: * Embedded: Embedded captions are included in the same file as the audio and video. Elastic Transcoder supports only one embedded caption per language, to a maximum of 300 embedded captions per file. Valid input values include: CEA-608 (EIA-608 , first non-empty channel only), CEA-708 (EIA-708 , first non-empty channel only), and mov-text Valid outputs include: mov-text Elastic Transcoder supports a maximum of one embedded format per output. * Sidecar: Sidecar captions are kept in a separate metadata file from the audio and video data. Sidecar captions require a player that is capable of understanding the relationship between the video file and the sidecar file. Elastic Transcoder supports only one sidecar caption per language, to a maximum of 20 sidecar captions per file. Valid input values include: dfxp (first div element only), ebu-tt , scc , smpt , srt , ttml (first div element only), and webvtt Valid outputs include: dfxp (first div element only), scc , srt , and webvtt . If you want ttml or smpte-tt compatible captions, specify dfxp as your output format. Elastic Transcoder does not support OCR (Optical Character Recognition), does not accept pictures as a valid input for captions, and is not available for audio-only transcoding. Elastic Transcoder does not preserve text formatting (for example, italics) during the transcoding process. To remove captions or leave the captions empty, set Captions to null. To pass through existing captions unchanged, set the MergePolicy to MergeRetain , and pass in a null CaptionSources array. For more information on embedded files, see the Subtitles Wikipedia page. For more information on sidecar files, see the Extensible Metadata Platform and Sidecar file Wikipedia pages.
  • cjoPresetId - The Id of the preset to use for this job. The preset determines the audio, video, and thumbnail settings that Elastic Transcoder uses for transcoding.
  • cjoComposition - You can create an output file that contains an excerpt from the input file. This excerpt, called a clip, can come from the beginning, middle, or end of the file. The Composition object contains settings for the clips that make up an output file. For the current release, you can only specify settings for a single clip per output file. The Composition object cannot be null.
  • cjoAlbumArt - Information about the album art that you want Elastic Transcoder to add to the file during transcoding. You can specify up to twenty album artworks for each output. Settings for each artwork must be defined in the job for the current output.
  • cjoWatermarks - Information about the watermarks that you want Elastic Transcoder to add to the video during transcoding. You can specify up to four watermarks for each output. Settings for each watermark must be defined in the preset for the current output.
  • cjoEncryption - You can specify encryption settings for any output files that you want to use for a transcoding job. This includes the output file and any watermarks, thumbnails, album art, or captions that you want to use. You must specify encryption settings for each file individually.
  • cjoKey - The name to assign to the transcoded file. Elastic Transcoder saves the file in the Amazon S3 bucket specified by the OutputBucket object in the pipeline that is specified by the pipeline ID. If a file with the specified name already exists in the output bucket, the job fails.
  • cjoSegmentDuration - Important: (Outputs in Fragmented MP4 or MPEG-TS format only. If you specify a preset in PresetId for which the value of Container is fmp4 (Fragmented MP4) or ts (MPEG-TS), SegmentDuration is the target maximum duration of each segment in seconds. For HLSv3 format playlists, each media segment is stored in a separate .ts file. For HLSv4 and Smooth playlists, all media segments for an output are stored in a single file. Each segment is approximately the length of the SegmentDuration , though individual segments might be shorter or longer. The range of valid values is 1 to 60 seconds. If the duration of the video is not evenly divisible by SegmentDuration , the duration of the last segment is the remainder of total length/SegmentDuration. Elastic Transcoder creates an output-specific playlist for each output HLS output that you specify in OutputKeys. To add an output to the master playlist for this job, include it in the OutputKeys of the associated playlist.
  • cjoThumbnailEncryption - The encryption settings, if any, that you want Elastic Transcoder to apply to your thumbnail.
  • cjoRotate - The number of degrees clockwise by which you want Elastic Transcoder to rotate the output relative to the input. Enter one of the following values: auto , 0 , 90 , 180 , 270 . The value auto generally works only if the file that you're transcoding contains rotation metadata.

cjoThumbnailPattern :: Lens' CreateJobOutput (Maybe Text) Source #

Whether you want Elastic Transcoder to create thumbnails for your videos and, if so, how you want Elastic Transcoder to name the files. If you don't want Elastic Transcoder to create thumbnails, specify "". If you do want Elastic Transcoder to create thumbnails, specify the information that you want to include in the file name for each thumbnail. You can specify the following values in any sequence: * {count} (Required) : If you want to create thumbnails, you must include {count} in the ThumbnailPattern object. Wherever you specify {count} , Elastic Transcoder adds a five-digit sequence number (beginning with 00001 ) to thumbnail file names. The number indicates where a given thumbnail appears in the sequence of thumbnails for a transcoded file. Important: If you specify a literal value and/or {resolution} but you omit {count} , Elastic Transcoder returns a validation error and does not create the job. * Literal values (Optional) : You can specify literal values anywhere in the ThumbnailPattern object. For example, you can include them as a file name prefix or as a delimiter between {resolution} and {count} . * {resolution} (Optional) : If you want Elastic Transcoder to include the resolution in the file name, include {resolution} in the ThumbnailPattern object. When creating thumbnails, Elastic Transcoder automatically saves the files in the format (.jpg or .png) that appears in the preset that you specified in the PresetID value of CreateJobOutput . Elastic Transcoder also appends the applicable file name extension.

cjoCaptions :: Lens' CreateJobOutput (Maybe Captions) Source #

You can configure Elastic Transcoder to transcode captions, or subtitles, from one format to another. All captions must be in UTF-8. Elastic Transcoder supports two types of captions: * Embedded: Embedded captions are included in the same file as the audio and video. Elastic Transcoder supports only one embedded caption per language, to a maximum of 300 embedded captions per file. Valid input values include: CEA-608 (EIA-608 , first non-empty channel only), CEA-708 (EIA-708 , first non-empty channel only), and mov-text Valid outputs include: mov-text Elastic Transcoder supports a maximum of one embedded format per output. * Sidecar: Sidecar captions are kept in a separate metadata file from the audio and video data. Sidecar captions require a player that is capable of understanding the relationship between the video file and the sidecar file. Elastic Transcoder supports only one sidecar caption per language, to a maximum of 20 sidecar captions per file. Valid input values include: dfxp (first div element only), ebu-tt , scc , smpt , srt , ttml (first div element only), and webvtt Valid outputs include: dfxp (first div element only), scc , srt , and webvtt . If you want ttml or smpte-tt compatible captions, specify dfxp as your output format. Elastic Transcoder does not support OCR (Optical Character Recognition), does not accept pictures as a valid input for captions, and is not available for audio-only transcoding. Elastic Transcoder does not preserve text formatting (for example, italics) during the transcoding process. To remove captions or leave the captions empty, set Captions to null. To pass through existing captions unchanged, set the MergePolicy to MergeRetain , and pass in a null CaptionSources array. For more information on embedded files, see the Subtitles Wikipedia page. For more information on sidecar files, see the Extensible Metadata Platform and Sidecar file Wikipedia pages.

cjoPresetId :: Lens' CreateJobOutput (Maybe Text) Source #

The Id of the preset to use for this job. The preset determines the audio, video, and thumbnail settings that Elastic Transcoder uses for transcoding.

cjoComposition :: Lens' CreateJobOutput [Clip] Source #

You can create an output file that contains an excerpt from the input file. This excerpt, called a clip, can come from the beginning, middle, or end of the file. The Composition object contains settings for the clips that make up an output file. For the current release, you can only specify settings for a single clip per output file. The Composition object cannot be null.

cjoAlbumArt :: Lens' CreateJobOutput (Maybe JobAlbumArt) Source #

Information about the album art that you want Elastic Transcoder to add to the file during transcoding. You can specify up to twenty album artworks for each output. Settings for each artwork must be defined in the job for the current output.

cjoWatermarks :: Lens' CreateJobOutput [JobWatermark] Source #

Information about the watermarks that you want Elastic Transcoder to add to the video during transcoding. You can specify up to four watermarks for each output. Settings for each watermark must be defined in the preset for the current output.

cjoEncryption :: Lens' CreateJobOutput (Maybe Encryption) Source #

You can specify encryption settings for any output files that you want to use for a transcoding job. This includes the output file and any watermarks, thumbnails, album art, or captions that you want to use. You must specify encryption settings for each file individually.

cjoKey :: Lens' CreateJobOutput (Maybe Text) Source #

The name to assign to the transcoded file. Elastic Transcoder saves the file in the Amazon S3 bucket specified by the OutputBucket object in the pipeline that is specified by the pipeline ID. If a file with the specified name already exists in the output bucket, the job fails.

cjoSegmentDuration :: Lens' CreateJobOutput (Maybe Text) Source #

Important: (Outputs in Fragmented MP4 or MPEG-TS format only. If you specify a preset in PresetId for which the value of Container is fmp4 (Fragmented MP4) or ts (MPEG-TS), SegmentDuration is the target maximum duration of each segment in seconds. For HLSv3 format playlists, each media segment is stored in a separate .ts file. For HLSv4 and Smooth playlists, all media segments for an output are stored in a single file. Each segment is approximately the length of the SegmentDuration , though individual segments might be shorter or longer. The range of valid values is 1 to 60 seconds. If the duration of the video is not evenly divisible by SegmentDuration , the duration of the last segment is the remainder of total length/SegmentDuration. Elastic Transcoder creates an output-specific playlist for each output HLS output that you specify in OutputKeys. To add an output to the master playlist for this job, include it in the OutputKeys of the associated playlist.

cjoThumbnailEncryption :: Lens' CreateJobOutput (Maybe Encryption) Source #

The encryption settings, if any, that you want Elastic Transcoder to apply to your thumbnail.

cjoRotate :: Lens' CreateJobOutput (Maybe Text) Source #

The number of degrees clockwise by which you want Elastic Transcoder to rotate the output relative to the input. Enter one of the following values: auto , 0 , 90 , 180 , 270 . The value auto generally works only if the file that you're transcoding contains rotation metadata.

CreateJobPlaylist

data CreateJobPlaylist Source #

Information about the master playlist.

See: createJobPlaylist smart constructor.

Instances

Eq CreateJobPlaylist Source # 
Data CreateJobPlaylist Source # 

Methods

gfoldl :: (forall d b. Data d => c (d -> b) -> d -> c b) -> (forall g. g -> c g) -> CreateJobPlaylist -> c CreateJobPlaylist #

gunfold :: (forall b r. Data b => c (b -> r) -> c r) -> (forall r. r -> c r) -> Constr -> c CreateJobPlaylist #

toConstr :: CreateJobPlaylist -> Constr #

dataTypeOf :: CreateJobPlaylist -> DataType #

dataCast1 :: Typeable (* -> *) t => (forall d. Data d => c (t d)) -> Maybe (c CreateJobPlaylist) #

dataCast2 :: Typeable (* -> * -> *) t => (forall d e. (Data d, Data e) => c (t d e)) -> Maybe (c CreateJobPlaylist) #

gmapT :: (forall b. Data b => b -> b) -> CreateJobPlaylist -> CreateJobPlaylist #

gmapQl :: (r -> r' -> r) -> r -> (forall d. Data d => d -> r') -> CreateJobPlaylist -> r #

gmapQr :: (r' -> r -> r) -> r -> (forall d. Data d => d -> r') -> CreateJobPlaylist -> r #

gmapQ :: (forall d. Data d => d -> u) -> CreateJobPlaylist -> [u] #

gmapQi :: Int -> (forall d. Data d => d -> u) -> CreateJobPlaylist -> u #

gmapM :: Monad m => (forall d. Data d => d -> m d) -> CreateJobPlaylist -> m CreateJobPlaylist #

gmapMp :: MonadPlus m => (forall d. Data d => d -> m d) -> CreateJobPlaylist -> m CreateJobPlaylist #

gmapMo :: MonadPlus m => (forall d. Data d => d -> m d) -> CreateJobPlaylist -> m CreateJobPlaylist #

Read CreateJobPlaylist Source # 
Show CreateJobPlaylist Source # 
Generic CreateJobPlaylist Source # 
Hashable CreateJobPlaylist Source # 
ToJSON CreateJobPlaylist Source # 
NFData CreateJobPlaylist Source # 

Methods

rnf :: CreateJobPlaylist -> () #

type Rep CreateJobPlaylist Source # 
type Rep CreateJobPlaylist = D1 * (MetaData "CreateJobPlaylist" "Network.AWS.ElasticTranscoder.Types.Product" "amazonka-elastictranscoder-1.6.0-38CPPD4da2X1lqPFWRIqIv" False) (C1 * (MetaCons "CreateJobPlaylist'" PrefixI True) ((:*:) * ((:*:) * (S1 * (MetaSel (Just Symbol "_cjpPlayReadyDrm") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe PlayReadyDrm))) (S1 * (MetaSel (Just Symbol "_cjpFormat") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text)))) ((:*:) * (S1 * (MetaSel (Just Symbol "_cjpOutputKeys") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe [Text]))) ((:*:) * (S1 * (MetaSel (Just Symbol "_cjpName") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))) (S1 * (MetaSel (Just Symbol "_cjpHlsContentProtection") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe HlsContentProtection)))))))

createJobPlaylist :: CreateJobPlaylist Source #

Creates a value of CreateJobPlaylist with the minimum fields required to make a request.

Use one of the following lenses to modify other fields as desired:

  • cjpPlayReadyDrm - The DRM settings, if any, that you want Elastic Transcoder to apply to the output files associated with this playlist.
  • cjpFormat - The format of the output playlist. Valid formats include HLSv3 , HLSv4 , and Smooth .
  • cjpOutputKeys - For each output in this job that you want to include in a master playlist, the value of the Outputs:Key object. * If your output is not HLS or does not have a segment duration set, the name of the output file is a concatenation of OutputKeyPrefix and Outputs:Key : OutputKeyPrefixOutputs:Key * If your output is HLSv3 and has a segment duration set, or is not included in a playlist, Elastic Transcoder creates an output playlist file with a file extension of .m3u8 , and a series of .ts files that include a five-digit sequential counter beginning with 00000: OutputKeyPrefixOutputs:Key .m3u8 OutputKeyPrefixOutputs:Key 00000.ts * If your output is HLSv4 , has a segment duration set, and is included in an HLSv4 playlist, Elastic Transcoder creates an output playlist file with a file extension of _v4.m3u8 . If the output is video, Elastic Transcoder also creates an output file with an extension of _iframe.m3u8 : OutputKeyPrefixOutputs:Key _v4.m3u8 OutputKeyPrefixOutputs:Key _iframe.m3u8 OutputKeyPrefixOutputs:Key .ts Elastic Transcoder automatically appends the relevant file extension to the file name. If you include a file extension in Output Key, the file name will have two extensions. If you include more than one output in a playlist, any segment duration settings, clip settings, or caption settings must be the same for all outputs in the playlist. For Smooth playlists, the Audio:Profile , Video:Profile , and Video:FrameRate to Video:KeyframesMaxDist ratio must be the same for all outputs.
  • cjpName - The name that you want Elastic Transcoder to assign to the master playlist, for example, nyc-vacation.m3u8. If the name includes a / character, the section of the name before the last / must be identical for all Name objects. If you create more than one master playlist, the values of all Name objects must be unique.
  • cjpHlsContentProtection - The HLS content protection settings, if any, that you want Elastic Transcoder to apply to the output files associated with this playlist.

cjpPlayReadyDrm :: Lens' CreateJobPlaylist (Maybe PlayReadyDrm) Source #

The DRM settings, if any, that you want Elastic Transcoder to apply to the output files associated with this playlist.

cjpFormat :: Lens' CreateJobPlaylist (Maybe Text) Source #

The format of the output playlist. Valid formats include HLSv3 , HLSv4 , and Smooth .

cjpOutputKeys :: Lens' CreateJobPlaylist [Text] Source #

For each output in this job that you want to include in a master playlist, the value of the Outputs:Key object. * If your output is not HLS or does not have a segment duration set, the name of the output file is a concatenation of OutputKeyPrefix and Outputs:Key : OutputKeyPrefixOutputs:Key * If your output is HLSv3 and has a segment duration set, or is not included in a playlist, Elastic Transcoder creates an output playlist file with a file extension of .m3u8 , and a series of .ts files that include a five-digit sequential counter beginning with 00000: OutputKeyPrefixOutputs:Key .m3u8 OutputKeyPrefixOutputs:Key 00000.ts * If your output is HLSv4 , has a segment duration set, and is included in an HLSv4 playlist, Elastic Transcoder creates an output playlist file with a file extension of _v4.m3u8 . If the output is video, Elastic Transcoder also creates an output file with an extension of _iframe.m3u8 : OutputKeyPrefixOutputs:Key _v4.m3u8 OutputKeyPrefixOutputs:Key _iframe.m3u8 OutputKeyPrefixOutputs:Key .ts Elastic Transcoder automatically appends the relevant file extension to the file name. If you include a file extension in Output Key, the file name will have two extensions. If you include more than one output in a playlist, any segment duration settings, clip settings, or caption settings must be the same for all outputs in the playlist. For Smooth playlists, the Audio:Profile , Video:Profile , and Video:FrameRate to Video:KeyframesMaxDist ratio must be the same for all outputs.

cjpName :: Lens' CreateJobPlaylist (Maybe Text) Source #

The name that you want Elastic Transcoder to assign to the master playlist, for example, nyc-vacation.m3u8. If the name includes a / character, the section of the name before the last / must be identical for all Name objects. If you create more than one master playlist, the values of all Name objects must be unique.

cjpHlsContentProtection :: Lens' CreateJobPlaylist (Maybe HlsContentProtection) Source #

The HLS content protection settings, if any, that you want Elastic Transcoder to apply to the output files associated with this playlist.

DetectedProperties

data DetectedProperties Source #

The detected properties of the input file. Elastic Transcoder identifies these values from the input file.

See: detectedProperties smart constructor.

Instances

Eq DetectedProperties Source # 
Data DetectedProperties Source # 

Methods

gfoldl :: (forall d b. Data d => c (d -> b) -> d -> c b) -> (forall g. g -> c g) -> DetectedProperties -> c DetectedProperties #

gunfold :: (forall b r. Data b => c (b -> r) -> c r) -> (forall r. r -> c r) -> Constr -> c DetectedProperties #

toConstr :: DetectedProperties -> Constr #

dataTypeOf :: DetectedProperties -> DataType #

dataCast1 :: Typeable (* -> *) t => (forall d. Data d => c (t d)) -> Maybe (c DetectedProperties) #

dataCast2 :: Typeable (* -> * -> *) t => (forall d e. (Data d, Data e) => c (t d e)) -> Maybe (c DetectedProperties) #

gmapT :: (forall b. Data b => b -> b) -> DetectedProperties -> DetectedProperties #

gmapQl :: (r -> r' -> r) -> r -> (forall d. Data d => d -> r') -> DetectedProperties -> r #

gmapQr :: (r' -> r -> r) -> r -> (forall d. Data d => d -> r') -> DetectedProperties -> r #

gmapQ :: (forall d. Data d => d -> u) -> DetectedProperties -> [u] #

gmapQi :: Int -> (forall d. Data d => d -> u) -> DetectedProperties -> u #

gmapM :: Monad m => (forall d. Data d => d -> m d) -> DetectedProperties -> m DetectedProperties #

gmapMp :: MonadPlus m => (forall d. Data d => d -> m d) -> DetectedProperties -> m DetectedProperties #

gmapMo :: MonadPlus m => (forall d. Data d => d -> m d) -> DetectedProperties -> m DetectedProperties #

Read DetectedProperties Source # 
Show DetectedProperties Source # 
Generic DetectedProperties Source # 
Hashable DetectedProperties Source # 
ToJSON DetectedProperties Source # 
FromJSON DetectedProperties Source # 
NFData DetectedProperties Source # 

Methods

rnf :: DetectedProperties -> () #

type Rep DetectedProperties Source # 
type Rep DetectedProperties = D1 * (MetaData "DetectedProperties" "Network.AWS.ElasticTranscoder.Types.Product" "amazonka-elastictranscoder-1.6.0-38CPPD4da2X1lqPFWRIqIv" False) (C1 * (MetaCons "DetectedProperties'" PrefixI True) ((:*:) * ((:*:) * (S1 * (MetaSel (Just Symbol "_dpHeight") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Int))) (S1 * (MetaSel (Just Symbol "_dpFrameRate") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text)))) ((:*:) * (S1 * (MetaSel (Just Symbol "_dpFileSize") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Integer))) ((:*:) * (S1 * (MetaSel (Just Symbol "_dpWidth") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Int))) (S1 * (MetaSel (Just Symbol "_dpDurationMillis") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Integer)))))))

detectedProperties :: DetectedProperties Source #

Creates a value of DetectedProperties with the minimum fields required to make a request.

Use one of the following lenses to modify other fields as desired:

  • dpHeight - The detected height of the input file, in pixels.
  • dpFrameRate - The detected frame rate of the input file, in frames per second.
  • dpFileSize - The detected file size of the input file, in bytes.
  • dpWidth - The detected width of the input file, in pixels.
  • dpDurationMillis - The detected duration of the input file, in milliseconds.

dpHeight :: Lens' DetectedProperties (Maybe Int) Source #

The detected height of the input file, in pixels.

dpFrameRate :: Lens' DetectedProperties (Maybe Text) Source #

The detected frame rate of the input file, in frames per second.

dpFileSize :: Lens' DetectedProperties (Maybe Integer) Source #

The detected file size of the input file, in bytes.

dpWidth :: Lens' DetectedProperties (Maybe Int) Source #

The detected width of the input file, in pixels.

dpDurationMillis :: Lens' DetectedProperties (Maybe Integer) Source #

The detected duration of the input file, in milliseconds.

Encryption

data Encryption Source #

The encryption settings, if any, that are used for decrypting your input files or encrypting your output files. If your input file is encrypted, you must specify the mode that Elastic Transcoder uses to decrypt your file, otherwise you must specify the mode you want Elastic Transcoder to use to encrypt your output files.

See: encryption smart constructor.

Instances

Eq Encryption Source # 
Data Encryption Source # 

Methods

gfoldl :: (forall d b. Data d => c (d -> b) -> d -> c b) -> (forall g. g -> c g) -> Encryption -> c Encryption #

gunfold :: (forall b r. Data b => c (b -> r) -> c r) -> (forall r. r -> c r) -> Constr -> c Encryption #

toConstr :: Encryption -> Constr #

dataTypeOf :: Encryption -> DataType #

dataCast1 :: Typeable (* -> *) t => (forall d. Data d => c (t d)) -> Maybe (c Encryption) #

dataCast2 :: Typeable (* -> * -> *) t => (forall d e. (Data d, Data e) => c (t d e)) -> Maybe (c Encryption) #

gmapT :: (forall b. Data b => b -> b) -> Encryption -> Encryption #

gmapQl :: (r -> r' -> r) -> r -> (forall d. Data d => d -> r') -> Encryption -> r #

gmapQr :: (r' -> r -> r) -> r -> (forall d. Data d => d -> r') -> Encryption -> r #

gmapQ :: (forall d. Data d => d -> u) -> Encryption -> [u] #

gmapQi :: Int -> (forall d. Data d => d -> u) -> Encryption -> u #

gmapM :: Monad m => (forall d. Data d => d -> m d) -> Encryption -> m Encryption #

gmapMp :: MonadPlus m => (forall d. Data d => d -> m d) -> Encryption -> m Encryption #

gmapMo :: MonadPlus m => (forall d. Data d => d -> m d) -> Encryption -> m Encryption #

Read Encryption Source # 
Show Encryption Source # 
Generic Encryption Source # 

Associated Types

type Rep Encryption :: * -> * #

Hashable Encryption Source # 
ToJSON Encryption Source # 
FromJSON Encryption Source # 
NFData Encryption Source # 

Methods

rnf :: Encryption -> () #

type Rep Encryption Source # 
type Rep Encryption = D1 * (MetaData "Encryption" "Network.AWS.ElasticTranscoder.Types.Product" "amazonka-elastictranscoder-1.6.0-38CPPD4da2X1lqPFWRIqIv" False) (C1 * (MetaCons "Encryption'" PrefixI True) ((:*:) * ((:*:) * (S1 * (MetaSel (Just Symbol "_eMode") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))) (S1 * (MetaSel (Just Symbol "_eKeyMD5") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text)))) ((:*:) * (S1 * (MetaSel (Just Symbol "_eKey") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))) (S1 * (MetaSel (Just Symbol "_eInitializationVector") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))))))

encryption :: Encryption Source #

Creates a value of Encryption with the minimum fields required to make a request.

Use one of the following lenses to modify other fields as desired:

  • eMode - The specific server-side encryption mode that you want Elastic Transcoder to use when decrypting your input files or encrypting your output files. Elastic Transcoder supports the following options: * S3: Amazon S3 creates and manages the keys used for encrypting your files. * S3-AWS-KMS: Amazon S3 calls the Amazon Key Management Service, which creates and manages the keys that are used for encrypting your files. If you specify S3-AWS-KMS and you don't want to use the default key, you must add the AWS-KMS key that you want to use to your pipeline. * AES-CBC-PKCS7: A padded cipher-block mode of operation originally used for HLS files. * AES-CTR: AES Counter Mode. * AES-GCM: AES Galois Counter Mode, a mode of operation that is an authenticated encryption format, meaning that a file, key, or initialization vector that has been tampered with fails the decryption process. For all three AES options, you must provide the following settings, which must be base64-encoded: * Key * Key MD5 * Initialization Vector Important: For the AES modes, your private encryption keys and your unencrypted data are never stored by AWS; therefore, it is important that you safely manage your encryption keys. If you lose them, you won't be able to unencrypt your data.
  • eKeyMD5 - The MD5 digest of the key that you used to encrypt your input file, or that you want Elastic Transcoder to use to encrypt your output file. Elastic Transcoder uses the key digest as a checksum to make sure your key was not corrupted in transit. The key MD5 must be base64-encoded, and it must be exactly 16 bytes long before being base64-encoded.
  • eKey - The data encryption key that you want Elastic Transcoder to use to encrypt your output file, or that was used to encrypt your input file. The key must be base64-encoded and it must be one of the following bit lengths before being base64-encoded: 128 , 192 , or 256 . The key must also be encrypted by using the Amazon Key Management Service.
  • eInitializationVector - The series of random bits created by a random bit generator, unique for every encryption operation, that you used to encrypt your input files or that you want Elastic Transcoder to use to encrypt your output files. The initialization vector must be base64-encoded, and it must be exactly 16 bytes long before being base64-encoded.

eMode :: Lens' Encryption (Maybe Text) Source #

The specific server-side encryption mode that you want Elastic Transcoder to use when decrypting your input files or encrypting your output files. Elastic Transcoder supports the following options: * S3: Amazon S3 creates and manages the keys used for encrypting your files. * S3-AWS-KMS: Amazon S3 calls the Amazon Key Management Service, which creates and manages the keys that are used for encrypting your files. If you specify S3-AWS-KMS and you don't want to use the default key, you must add the AWS-KMS key that you want to use to your pipeline. * AES-CBC-PKCS7: A padded cipher-block mode of operation originally used for HLS files. * AES-CTR: AES Counter Mode. * AES-GCM: AES Galois Counter Mode, a mode of operation that is an authenticated encryption format, meaning that a file, key, or initialization vector that has been tampered with fails the decryption process. For all three AES options, you must provide the following settings, which must be base64-encoded: * Key * Key MD5 * Initialization Vector Important: For the AES modes, your private encryption keys and your unencrypted data are never stored by AWS; therefore, it is important that you safely manage your encryption keys. If you lose them, you won't be able to unencrypt your data.

eKeyMD5 :: Lens' Encryption (Maybe Text) Source #

The MD5 digest of the key that you used to encrypt your input file, or that you want Elastic Transcoder to use to encrypt your output file. Elastic Transcoder uses the key digest as a checksum to make sure your key was not corrupted in transit. The key MD5 must be base64-encoded, and it must be exactly 16 bytes long before being base64-encoded.

eKey :: Lens' Encryption (Maybe Text) Source #

The data encryption key that you want Elastic Transcoder to use to encrypt your output file, or that was used to encrypt your input file. The key must be base64-encoded and it must be one of the following bit lengths before being base64-encoded: 128 , 192 , or 256 . The key must also be encrypted by using the Amazon Key Management Service.

eInitializationVector :: Lens' Encryption (Maybe Text) Source #

The series of random bits created by a random bit generator, unique for every encryption operation, that you used to encrypt your input files or that you want Elastic Transcoder to use to encrypt your output files. The initialization vector must be base64-encoded, and it must be exactly 16 bytes long before being base64-encoded.

HlsContentProtection

data HlsContentProtection Source #

The HLS content protection settings, if any, that you want Elastic Transcoder to apply to your output files.

See: hlsContentProtection smart constructor.

Instances

Eq HlsContentProtection Source # 
Data HlsContentProtection Source # 

Methods

gfoldl :: (forall d b. Data d => c (d -> b) -> d -> c b) -> (forall g. g -> c g) -> HlsContentProtection -> c HlsContentProtection #

gunfold :: (forall b r. Data b => c (b -> r) -> c r) -> (forall r. r -> c r) -> Constr -> c HlsContentProtection #

toConstr :: HlsContentProtection -> Constr #

dataTypeOf :: HlsContentProtection -> DataType #

dataCast1 :: Typeable (* -> *) t => (forall d. Data d => c (t d)) -> Maybe (c HlsContentProtection) #

dataCast2 :: Typeable (* -> * -> *) t => (forall d e. (Data d, Data e) => c (t d e)) -> Maybe (c HlsContentProtection) #

gmapT :: (forall b. Data b => b -> b) -> HlsContentProtection -> HlsContentProtection #

gmapQl :: (r -> r' -> r) -> r -> (forall d. Data d => d -> r') -> HlsContentProtection -> r #

gmapQr :: (r' -> r -> r) -> r -> (forall d. Data d => d -> r') -> HlsContentProtection -> r #

gmapQ :: (forall d. Data d => d -> u) -> HlsContentProtection -> [u] #

gmapQi :: Int -> (forall d. Data d => d -> u) -> HlsContentProtection -> u #

gmapM :: Monad m => (forall d. Data d => d -> m d) -> HlsContentProtection -> m HlsContentProtection #

gmapMp :: MonadPlus m => (forall d. Data d => d -> m d) -> HlsContentProtection -> m HlsContentProtection #

gmapMo :: MonadPlus m => (forall d. Data d => d -> m d) -> HlsContentProtection -> m HlsContentProtection #

Read HlsContentProtection Source # 
Show HlsContentProtection Source # 
Generic HlsContentProtection Source # 
Hashable HlsContentProtection Source # 
ToJSON HlsContentProtection Source # 
FromJSON HlsContentProtection Source # 
NFData HlsContentProtection Source # 

Methods

rnf :: HlsContentProtection -> () #

type Rep HlsContentProtection Source # 
type Rep HlsContentProtection = D1 * (MetaData "HlsContentProtection" "Network.AWS.ElasticTranscoder.Types.Product" "amazonka-elastictranscoder-1.6.0-38CPPD4da2X1lqPFWRIqIv" False) (C1 * (MetaCons "HlsContentProtection'" PrefixI True) ((:*:) * ((:*:) * (S1 * (MetaSel (Just Symbol "_hcpKeyMD5") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))) ((:*:) * (S1 * (MetaSel (Just Symbol "_hcpKeyStoragePolicy") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))) (S1 * (MetaSel (Just Symbol "_hcpKey") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))))) ((:*:) * (S1 * (MetaSel (Just Symbol "_hcpMethod") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))) ((:*:) * (S1 * (MetaSel (Just Symbol "_hcpInitializationVector") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))) (S1 * (MetaSel (Just Symbol "_hcpLicenseAcquisitionURL") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text)))))))

hlsContentProtection :: HlsContentProtection Source #

Creates a value of HlsContentProtection with the minimum fields required to make a request.

Use one of the following lenses to modify other fields as desired:

  • hcpKeyMD5 - If Elastic Transcoder is generating your key for you, you must leave this field blank. The MD5 digest of the key that you want Elastic Transcoder to use to encrypt your output file, and that you want Elastic Transcoder to use as a checksum to make sure your key was not corrupted in transit. The key MD5 must be base64-encoded, and it must be exactly 16 bytes before being base64- encoded.
  • hcpKeyStoragePolicy - Specify whether you want Elastic Transcoder to write your HLS license key to an Amazon S3 bucket. If you choose WithVariantPlaylists , LicenseAcquisitionUrl must be left blank and Elastic Transcoder writes your data key into the same bucket as the associated playlist.
  • hcpKey - If you want Elastic Transcoder to generate a key for you, leave this field blank. If you choose to supply your own key, you must encrypt the key by using AWS KMS. The key must be base64-encoded, and it must be one of the following bit lengths before being base64-encoded: 128 , 192 , or 256 .
  • hcpMethod - The content protection method for your output. The only valid value is: aes-128 . This value is written into the method attribute of the EXT-X-KEY metadata tag in the output playlist.
  • hcpInitializationVector - If Elastic Transcoder is generating your key for you, you must leave this field blank. The series of random bits created by a random bit generator, unique for every encryption operation, that you want Elastic Transcoder to use to encrypt your output files. The initialization vector must be base64-encoded, and it must be exactly 16 bytes before being base64-encoded.
  • hcpLicenseAcquisitionURL - The location of the license key required to decrypt your HLS playlist. The URL must be an absolute path, and is referenced in the URI attribute of the EXT-X-KEY metadata tag in the playlist file.

hcpKeyMD5 :: Lens' HlsContentProtection (Maybe Text) Source #

If Elastic Transcoder is generating your key for you, you must leave this field blank. The MD5 digest of the key that you want Elastic Transcoder to use to encrypt your output file, and that you want Elastic Transcoder to use as a checksum to make sure your key was not corrupted in transit. The key MD5 must be base64-encoded, and it must be exactly 16 bytes before being base64- encoded.

hcpKeyStoragePolicy :: Lens' HlsContentProtection (Maybe Text) Source #

Specify whether you want Elastic Transcoder to write your HLS license key to an Amazon S3 bucket. If you choose WithVariantPlaylists , LicenseAcquisitionUrl must be left blank and Elastic Transcoder writes your data key into the same bucket as the associated playlist.

hcpKey :: Lens' HlsContentProtection (Maybe Text) Source #

If you want Elastic Transcoder to generate a key for you, leave this field blank. If you choose to supply your own key, you must encrypt the key by using AWS KMS. The key must be base64-encoded, and it must be one of the following bit lengths before being base64-encoded: 128 , 192 , or 256 .

hcpMethod :: Lens' HlsContentProtection (Maybe Text) Source #

The content protection method for your output. The only valid value is: aes-128 . This value is written into the method attribute of the EXT-X-KEY metadata tag in the output playlist.

hcpInitializationVector :: Lens' HlsContentProtection (Maybe Text) Source #

If Elastic Transcoder is generating your key for you, you must leave this field blank. The series of random bits created by a random bit generator, unique for every encryption operation, that you want Elastic Transcoder to use to encrypt your output files. The initialization vector must be base64-encoded, and it must be exactly 16 bytes before being base64-encoded.

hcpLicenseAcquisitionURL :: Lens' HlsContentProtection (Maybe Text) Source #

The location of the license key required to decrypt your HLS playlist. The URL must be an absolute path, and is referenced in the URI attribute of the EXT-X-KEY metadata tag in the playlist file.

InputCaptions

data InputCaptions Source #

The captions to be created, if any.

See: inputCaptions smart constructor.

Instances

Eq InputCaptions Source # 
Data InputCaptions Source # 

Methods

gfoldl :: (forall d b. Data d => c (d -> b) -> d -> c b) -> (forall g. g -> c g) -> InputCaptions -> c InputCaptions #

gunfold :: (forall b r. Data b => c (b -> r) -> c r) -> (forall r. r -> c r) -> Constr -> c InputCaptions #

toConstr :: InputCaptions -> Constr #

dataTypeOf :: InputCaptions -> DataType #

dataCast1 :: Typeable (* -> *) t => (forall d. Data d => c (t d)) -> Maybe (c InputCaptions) #

dataCast2 :: Typeable (* -> * -> *) t => (forall d e. (Data d, Data e) => c (t d e)) -> Maybe (c InputCaptions) #

gmapT :: (forall b. Data b => b -> b) -> InputCaptions -> InputCaptions #

gmapQl :: (r -> r' -> r) -> r -> (forall d. Data d => d -> r') -> InputCaptions -> r #

gmapQr :: (r' -> r -> r) -> r -> (forall d. Data d => d -> r') -> InputCaptions -> r #

gmapQ :: (forall d. Data d => d -> u) -> InputCaptions -> [u] #

gmapQi :: Int -> (forall d. Data d => d -> u) -> InputCaptions -> u #

gmapM :: Monad m => (forall d. Data d => d -> m d) -> InputCaptions -> m InputCaptions #

gmapMp :: MonadPlus m => (forall d. Data d => d -> m d) -> InputCaptions -> m InputCaptions #

gmapMo :: MonadPlus m => (forall d. Data d => d -> m d) -> InputCaptions -> m InputCaptions #

Read InputCaptions Source # 
Show InputCaptions Source # 
Generic InputCaptions Source # 

Associated Types

type Rep InputCaptions :: * -> * #

Hashable InputCaptions Source # 
ToJSON InputCaptions Source # 
FromJSON InputCaptions Source # 
NFData InputCaptions Source # 

Methods

rnf :: InputCaptions -> () #

type Rep InputCaptions Source # 
type Rep InputCaptions = D1 * (MetaData "InputCaptions" "Network.AWS.ElasticTranscoder.Types.Product" "amazonka-elastictranscoder-1.6.0-38CPPD4da2X1lqPFWRIqIv" False) (C1 * (MetaCons "InputCaptions'" PrefixI True) ((:*:) * (S1 * (MetaSel (Just Symbol "_icMergePolicy") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))) (S1 * (MetaSel (Just Symbol "_icCaptionSources") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe [CaptionSource])))))

inputCaptions :: InputCaptions Source #

Creates a value of InputCaptions with the minimum fields required to make a request.

Use one of the following lenses to modify other fields as desired:

  • icMergePolicy - A policy that determines how Elastic Transcoder handles the existence of multiple captions. * MergeOverride: Elastic Transcoder transcodes both embedded and sidecar captions into outputs. If captions for a language are embedded in the input file and also appear in a sidecar file, Elastic Transcoder uses the sidecar captions and ignores the embedded captions for that language. * MergeRetain: Elastic Transcoder transcodes both embedded and sidecar captions into outputs. If captions for a language are embedded in the input file and also appear in a sidecar file, Elastic Transcoder uses the embedded captions and ignores the sidecar captions for that language. If CaptionSources is empty, Elastic Transcoder omits all sidecar captions from the output files. * Override: Elastic Transcoder transcodes only the sidecar captions that you specify in CaptionSources . MergePolicy cannot be null.
  • icCaptionSources - Source files for the input sidecar captions used during the transcoding process. To omit all sidecar captions, leave CaptionSources blank.

icMergePolicy :: Lens' InputCaptions (Maybe Text) Source #

A policy that determines how Elastic Transcoder handles the existence of multiple captions. * MergeOverride: Elastic Transcoder transcodes both embedded and sidecar captions into outputs. If captions for a language are embedded in the input file and also appear in a sidecar file, Elastic Transcoder uses the sidecar captions and ignores the embedded captions for that language. * MergeRetain: Elastic Transcoder transcodes both embedded and sidecar captions into outputs. If captions for a language are embedded in the input file and also appear in a sidecar file, Elastic Transcoder uses the embedded captions and ignores the sidecar captions for that language. If CaptionSources is empty, Elastic Transcoder omits all sidecar captions from the output files. * Override: Elastic Transcoder transcodes only the sidecar captions that you specify in CaptionSources . MergePolicy cannot be null.

icCaptionSources :: Lens' InputCaptions [CaptionSource] Source #

Source files for the input sidecar captions used during the transcoding process. To omit all sidecar captions, leave CaptionSources blank.

Job'

data Job' Source #

A section of the response body that provides information about the job that is created.

See: job' smart constructor.

Instances

Eq Job' Source # 

Methods

(==) :: Job' -> Job' -> Bool #

(/=) :: Job' -> Job' -> Bool #

Data Job' Source # 

Methods

gfoldl :: (forall d b. Data d => c (d -> b) -> d -> c b) -> (forall g. g -> c g) -> Job' -> c Job' #

gunfold :: (forall b r. Data b => c (b -> r) -> c r) -> (forall r. r -> c r) -> Constr -> c Job' #

toConstr :: Job' -> Constr #

dataTypeOf :: Job' -> DataType #

dataCast1 :: Typeable (* -> *) t => (forall d. Data d => c (t d)) -> Maybe (c Job') #

dataCast2 :: Typeable (* -> * -> *) t => (forall d e. (Data d, Data e) => c (t d e)) -> Maybe (c Job') #

gmapT :: (forall b. Data b => b -> b) -> Job' -> Job' #

gmapQl :: (r -> r' -> r) -> r -> (forall d. Data d => d -> r') -> Job' -> r #

gmapQr :: (r' -> r -> r) -> r -> (forall d. Data d => d -> r') -> Job' -> r #

gmapQ :: (forall d. Data d => d -> u) -> Job' -> [u] #

gmapQi :: Int -> (forall d. Data d => d -> u) -> Job' -> u #

gmapM :: Monad m => (forall d. Data d => d -> m d) -> Job' -> m Job' #

gmapMp :: MonadPlus m => (forall d. Data d => d -> m d) -> Job' -> m Job' #

gmapMo :: MonadPlus m => (forall d. Data d => d -> m d) -> Job' -> m Job' #

Read Job' Source # 
Show Job' Source # 

Methods

showsPrec :: Int -> Job' -> ShowS #

show :: Job' -> String #

showList :: [Job'] -> ShowS #

Generic Job' Source # 

Associated Types

type Rep Job' :: * -> * #

Methods

from :: Job' -> Rep Job' x #

to :: Rep Job' x -> Job' #

Hashable Job' Source # 

Methods

hashWithSalt :: Int -> Job' -> Int #

hash :: Job' -> Int #

FromJSON Job' Source # 
NFData Job' Source # 

Methods

rnf :: Job' -> () #

type Rep Job' Source # 
type Rep Job' = D1 * (MetaData "Job'" "Network.AWS.ElasticTranscoder.Types.Product" "amazonka-elastictranscoder-1.6.0-38CPPD4da2X1lqPFWRIqIv" False) (C1 * (MetaCons "Job''" PrefixI True) ((:*:) * ((:*:) * ((:*:) * (S1 * (MetaSel (Just Symbol "_jStatus") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))) ((:*:) * (S1 * (MetaSel (Just Symbol "_jPipelineId") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))) (S1 * (MetaSel (Just Symbol "_jARN") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))))) ((:*:) * (S1 * (MetaSel (Just Symbol "_jInputs") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe [JobInput]))) ((:*:) * (S1 * (MetaSel (Just Symbol "_jInput") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe JobInput))) (S1 * (MetaSel (Just Symbol "_jUserMetadata") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe (Map Text Text))))))) ((:*:) * ((:*:) * (S1 * (MetaSel (Just Symbol "_jOutputs") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe [JobOutput]))) ((:*:) * (S1 * (MetaSel (Just Symbol "_jOutput") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe JobOutput))) (S1 * (MetaSel (Just Symbol "_jId") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))))) ((:*:) * (S1 * (MetaSel (Just Symbol "_jPlaylists") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe [Playlist]))) ((:*:) * (S1 * (MetaSel (Just Symbol "_jOutputKeyPrefix") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))) (S1 * (MetaSel (Just Symbol "_jTiming") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Timing))))))))

job' :: Job' Source #

Creates a value of Job' with the minimum fields required to make a request.

Use one of the following lenses to modify other fields as desired:

  • jStatus - The status of the job: Submitted , Progressing , Complete , Canceled , or Error .
  • jPipelineId - The Id of the pipeline that you want Elastic Transcoder to use for transcoding. The pipeline determines several settings, including the Amazon S3 bucket from which Elastic Transcoder gets the files to transcode and the bucket into which Elastic Transcoder puts the transcoded files.
  • jARN - The Amazon Resource Name (ARN) for the job.
  • jInputs - Information about the files that you're transcoding. If you specified multiple files for this job, Elastic Transcoder stitches the files together to make one output.
  • jInput - A section of the request or response body that provides information about the file that is being transcoded.
  • jUserMetadata - User-defined metadata that you want to associate with an Elastic Transcoder job. You specify metadata in key/value pairs, and you can add up to 10 key/value pairs per job. Elastic Transcoder does not guarantee that key/value pairs are returned in the same order in which you specify them. Metadata keys and values must use characters from the following list: * 0-9 * A-Z and a-z * Space * The following symbols: _.:/=+-%@
  • jOutputs - Information about the output files. We recommend that you use the Outputs syntax for all jobs, even when you want Elastic Transcoder to transcode a file into only one format. Do not use both the Outputs and Output syntaxes in the same request. You can create a maximum of 30 outputs per job. If you specify more than one output for a job, Elastic Transcoder creates the files for each output in the order in which you specify them in the job.
  • jOutput - If you specified one output for a job, information about that output. If you specified multiple outputs for a job, the Output object lists information about the first output. This duplicates the information that is listed for the first output in the Outputs object. Important: Outputs recommended instead. A section of the request or response body that provides information about the transcoded (target) file.
  • jId - The identifier that Elastic Transcoder assigned to the job. You use this value to get settings for the job or to delete the job.
  • jPlaylists - Important: Outputs in Fragmented MP4 or MPEG-TS format only. If you specify a preset in PresetId for which the value of Container is fmp4 (Fragmented MP4) or ts (MPEG-TS), Playlists contains information about the master playlists that you want Elastic Transcoder to create. The maximum number of master playlists in a job is 30.
  • jOutputKeyPrefix - The value, if any, that you want Elastic Transcoder to prepend to the names of all files that this job creates, including output files, thumbnails, and playlists. We recommend that you add a / or some other delimiter to the end of the OutputKeyPrefix .
  • jTiming - Details about the timing of a job.

jStatus :: Lens' Job' (Maybe Text) Source #

The status of the job: Submitted , Progressing , Complete , Canceled , or Error .

jPipelineId :: Lens' Job' (Maybe Text) Source #

The Id of the pipeline that you want Elastic Transcoder to use for transcoding. The pipeline determines several settings, including the Amazon S3 bucket from which Elastic Transcoder gets the files to transcode and the bucket into which Elastic Transcoder puts the transcoded files.

jARN :: Lens' Job' (Maybe Text) Source #

The Amazon Resource Name (ARN) for the job.

jInputs :: Lens' Job' [JobInput] Source #

Information about the files that you're transcoding. If you specified multiple files for this job, Elastic Transcoder stitches the files together to make one output.

jInput :: Lens' Job' (Maybe JobInput) Source #

A section of the request or response body that provides information about the file that is being transcoded.

jUserMetadata :: Lens' Job' (HashMap Text Text) Source #

User-defined metadata that you want to associate with an Elastic Transcoder job. You specify metadata in key/value pairs, and you can add up to 10 key/value pairs per job. Elastic Transcoder does not guarantee that key/value pairs are returned in the same order in which you specify them. Metadata keys and values must use characters from the following list: * 0-9 * A-Z and a-z * Space * The following symbols: _.:/=+-%@

jOutputs :: Lens' Job' [JobOutput] Source #

Information about the output files. We recommend that you use the Outputs syntax for all jobs, even when you want Elastic Transcoder to transcode a file into only one format. Do not use both the Outputs and Output syntaxes in the same request. You can create a maximum of 30 outputs per job. If you specify more than one output for a job, Elastic Transcoder creates the files for each output in the order in which you specify them in the job.

jOutput :: Lens' Job' (Maybe JobOutput) Source #

If you specified one output for a job, information about that output. If you specified multiple outputs for a job, the Output object lists information about the first output. This duplicates the information that is listed for the first output in the Outputs object. Important: Outputs recommended instead. A section of the request or response body that provides information about the transcoded (target) file.

jId :: Lens' Job' (Maybe Text) Source #

The identifier that Elastic Transcoder assigned to the job. You use this value to get settings for the job or to delete the job.

jPlaylists :: Lens' Job' [Playlist] Source #

Important: Outputs in Fragmented MP4 or MPEG-TS format only. If you specify a preset in PresetId for which the value of Container is fmp4 (Fragmented MP4) or ts (MPEG-TS), Playlists contains information about the master playlists that you want Elastic Transcoder to create. The maximum number of master playlists in a job is 30.

jOutputKeyPrefix :: Lens' Job' (Maybe Text) Source #

The value, if any, that you want Elastic Transcoder to prepend to the names of all files that this job creates, including output files, thumbnails, and playlists. We recommend that you add a / or some other delimiter to the end of the OutputKeyPrefix .

jTiming :: Lens' Job' (Maybe Timing) Source #

Details about the timing of a job.

JobAlbumArt

data JobAlbumArt Source #

The .jpg or .png file associated with an audio file.

See: jobAlbumArt smart constructor.

Instances

Eq JobAlbumArt Source # 
Data JobAlbumArt Source # 

Methods

gfoldl :: (forall d b. Data d => c (d -> b) -> d -> c b) -> (forall g. g -> c g) -> JobAlbumArt -> c JobAlbumArt #

gunfold :: (forall b r. Data b => c (b -> r) -> c r) -> (forall r. r -> c r) -> Constr -> c JobAlbumArt #

toConstr :: JobAlbumArt -> Constr #

dataTypeOf :: JobAlbumArt -> DataType #

dataCast1 :: Typeable (* -> *) t => (forall d. Data d => c (t d)) -> Maybe (c JobAlbumArt) #

dataCast2 :: Typeable (* -> * -> *) t => (forall d e. (Data d, Data e) => c (t d e)) -> Maybe (c JobAlbumArt) #

gmapT :: (forall b. Data b => b -> b) -> JobAlbumArt -> JobAlbumArt #

gmapQl :: (r -> r' -> r) -> r -> (forall d. Data d => d -> r') -> JobAlbumArt -> r #

gmapQr :: (r' -> r -> r) -> r -> (forall d. Data d => d -> r') -> JobAlbumArt -> r #

gmapQ :: (forall d. Data d => d -> u) -> JobAlbumArt -> [u] #

gmapQi :: Int -> (forall d. Data d => d -> u) -> JobAlbumArt -> u #

gmapM :: Monad m => (forall d. Data d => d -> m d) -> JobAlbumArt -> m JobAlbumArt #

gmapMp :: MonadPlus m => (forall d. Data d => d -> m d) -> JobAlbumArt -> m JobAlbumArt #

gmapMo :: MonadPlus m => (forall d. Data d => d -> m d) -> JobAlbumArt -> m JobAlbumArt #

Read JobAlbumArt Source # 
Show JobAlbumArt Source # 
Generic JobAlbumArt Source # 

Associated Types

type Rep JobAlbumArt :: * -> * #

Hashable JobAlbumArt Source # 
ToJSON JobAlbumArt Source # 
FromJSON JobAlbumArt Source # 
NFData JobAlbumArt Source # 

Methods

rnf :: JobAlbumArt -> () #

type Rep JobAlbumArt Source # 
type Rep JobAlbumArt = D1 * (MetaData "JobAlbumArt" "Network.AWS.ElasticTranscoder.Types.Product" "amazonka-elastictranscoder-1.6.0-38CPPD4da2X1lqPFWRIqIv" False) (C1 * (MetaCons "JobAlbumArt'" PrefixI True) ((:*:) * (S1 * (MetaSel (Just Symbol "_jaaMergePolicy") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))) (S1 * (MetaSel (Just Symbol "_jaaArtwork") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe [Artwork])))))

jobAlbumArt :: JobAlbumArt Source #

Creates a value of JobAlbumArt with the minimum fields required to make a request.

Use one of the following lenses to modify other fields as desired:

  • jaaMergePolicy - A policy that determines how Elastic Transcoder handles the existence of multiple album artwork files. * Replace: The specified album art replaces any existing album art. * Prepend: The specified album art is placed in front of any existing album art. * Append: The specified album art is placed after any existing album art. * Fallback: If the original input file contains artwork, Elastic Transcoder uses that artwork for the output. If the original input does not contain artwork, Elastic Transcoder uses the specified album art file.
  • jaaArtwork - The file to be used as album art. There can be multiple artworks associated with an audio file, to a maximum of 20. Valid formats are .jpg and .png

jaaMergePolicy :: Lens' JobAlbumArt (Maybe Text) Source #

A policy that determines how Elastic Transcoder handles the existence of multiple album artwork files. * Replace: The specified album art replaces any existing album art. * Prepend: The specified album art is placed in front of any existing album art. * Append: The specified album art is placed after any existing album art. * Fallback: If the original input file contains artwork, Elastic Transcoder uses that artwork for the output. If the original input does not contain artwork, Elastic Transcoder uses the specified album art file.

jaaArtwork :: Lens' JobAlbumArt [Artwork] Source #

The file to be used as album art. There can be multiple artworks associated with an audio file, to a maximum of 20. Valid formats are .jpg and .png

JobInput

data JobInput Source #

Information about the file that you're transcoding.

See: jobInput smart constructor.

Instances

Eq JobInput Source # 
Data JobInput Source # 

Methods

gfoldl :: (forall d b. Data d => c (d -> b) -> d -> c b) -> (forall g. g -> c g) -> JobInput -> c JobInput #

gunfold :: (forall b r. Data b => c (b -> r) -> c r) -> (forall r. r -> c r) -> Constr -> c JobInput #

toConstr :: JobInput -> Constr #

dataTypeOf :: JobInput -> DataType #

dataCast1 :: Typeable (* -> *) t => (forall d. Data d => c (t d)) -> Maybe (c JobInput) #

dataCast2 :: Typeable (* -> * -> *) t => (forall d e. (Data d, Data e) => c (t d e)) -> Maybe (c JobInput) #

gmapT :: (forall b. Data b => b -> b) -> JobInput -> JobInput #

gmapQl :: (r -> r' -> r) -> r -> (forall d. Data d => d -> r') -> JobInput -> r #

gmapQr :: (r' -> r -> r) -> r -> (forall d. Data d => d -> r') -> JobInput -> r #

gmapQ :: (forall d. Data d => d -> u) -> JobInput -> [u] #

gmapQi :: Int -> (forall d. Data d => d -> u) -> JobInput -> u #

gmapM :: Monad m => (forall d. Data d => d -> m d) -> JobInput -> m JobInput #

gmapMp :: MonadPlus m => (forall d. Data d => d -> m d) -> JobInput -> m JobInput #

gmapMo :: MonadPlus m => (forall d. Data d => d -> m d) -> JobInput -> m JobInput #

Read JobInput Source # 
Show JobInput Source # 
Generic JobInput Source # 

Associated Types

type Rep JobInput :: * -> * #

Methods

from :: JobInput -> Rep JobInput x #

to :: Rep JobInput x -> JobInput #

Hashable JobInput Source # 

Methods

hashWithSalt :: Int -> JobInput -> Int #

hash :: JobInput -> Int #

ToJSON JobInput Source # 
FromJSON JobInput Source # 
NFData JobInput Source # 

Methods

rnf :: JobInput -> () #

type Rep JobInput Source # 

jobInput :: JobInput Source #

Creates a value of JobInput with the minimum fields required to make a request.

Use one of the following lenses to modify other fields as desired:

  • jiFrameRate - The frame rate of the input file. If you want Elastic Transcoder to automatically detect the frame rate of the input file, specify auto . If you want to specify the frame rate for the input file, enter one of the following values: 10 , 15 , 23.97 , 24 , 25 , 29.97 , 30 , 60 If you specify a value other than auto , Elastic Transcoder disables automatic detection of the frame rate.
  • jiResolution - This value must be auto , which causes Elastic Transcoder to automatically detect the resolution of the input file.
  • jiAspectRatio - The aspect ratio of the input file. If you want Elastic Transcoder to automatically detect the aspect ratio of the input file, specify auto . If you want to specify the aspect ratio for the output file, enter one of the following values: 1:1 , 4:3 , 3:2 , 16:9 If you specify a value other than auto , Elastic Transcoder disables automatic detection of the aspect ratio.
  • jiTimeSpan - Settings for clipping an input. Each input can have different clip settings.
  • jiEncryption - The encryption settings, if any, that are used for decrypting your input files. If your input file is encrypted, you must specify the mode that Elastic Transcoder uses to decrypt your file.
  • jiKey - The name of the file to transcode. Elsewhere in the body of the JSON block is the the ID of the pipeline to use for processing the job. The InputBucket object in that pipeline tells Elastic Transcoder which Amazon S3 bucket to get the file from. If the file name includes a prefix, such as cooking/lasagna.mpg , include the prefix in the key. If the file isn't in the specified bucket, Elastic Transcoder returns an error.
  • jiDetectedProperties - The detected properties of the input file.
  • jiContainer - The container type for the input file. If you want Elastic Transcoder to automatically detect the container type of the input file, specify auto . If you want to specify the container type for the input file, enter one of the following values: 3gp , aac , asf , avi , divx , flv , m4a , mkv , mov , mp3 , mp4 , mpeg , mpeg-ps , mpeg-ts , mxf , ogg , vob , wav , webm
  • jiInterlaced - Whether the input file is interlaced. If you want Elastic Transcoder to automatically detect whether the input file is interlaced, specify auto . If you want to specify whether the input file is interlaced, enter one of the following values: true , false If you specify a value other than auto , Elastic Transcoder disables automatic detection of interlacing.
  • jiInputCaptions - You can configure Elastic Transcoder to transcode captions, or subtitles, from one format to another. All captions must be in UTF-8. Elastic Transcoder supports two types of captions: * Embedded: Embedded captions are included in the same file as the audio and video. Elastic Transcoder supports only one embedded caption per language, to a maximum of 300 embedded captions per file. Valid input values include: CEA-608 (EIA-608 , first non-empty channel only), CEA-708 (EIA-708 , first non-empty channel only), and mov-text Valid outputs include: mov-text Elastic Transcoder supports a maximum of one embedded format per output. * Sidecar: Sidecar captions are kept in a separate metadata file from the audio and video data. Sidecar captions require a player that is capable of understanding the relationship between the video file and the sidecar file. Elastic Transcoder supports only one sidecar caption per language, to a maximum of 20 sidecar captions per file. Valid input values include: dfxp (first div element only), ebu-tt , scc , smpt , srt , ttml (first div element only), and webvtt Valid outputs include: dfxp (first div element only), scc , srt , and webvtt . If you want ttml or smpte-tt compatible captions, specify dfxp as your output format. Elastic Transcoder does not support OCR (Optical Character Recognition), does not accept pictures as a valid input for captions, and is not available for audio-only transcoding. Elastic Transcoder does not preserve text formatting (for example, italics) during the transcoding process. To remove captions or leave the captions empty, set Captions to null. To pass through existing captions unchanged, set the MergePolicy to MergeRetain , and pass in a null CaptionSources array. For more information on embedded files, see the Subtitles Wikipedia page. For more information on sidecar files, see the Extensible Metadata Platform and Sidecar file Wikipedia pages.

jiFrameRate :: Lens' JobInput (Maybe Text) Source #

The frame rate of the input file. If you want Elastic Transcoder to automatically detect the frame rate of the input file, specify auto . If you want to specify the frame rate for the input file, enter one of the following values: 10 , 15 , 23.97 , 24 , 25 , 29.97 , 30 , 60 If you specify a value other than auto , Elastic Transcoder disables automatic detection of the frame rate.

jiResolution :: Lens' JobInput (Maybe Text) Source #

This value must be auto , which causes Elastic Transcoder to automatically detect the resolution of the input file.

jiAspectRatio :: Lens' JobInput (Maybe Text) Source #

The aspect ratio of the input file. If you want Elastic Transcoder to automatically detect the aspect ratio of the input file, specify auto . If you want to specify the aspect ratio for the output file, enter one of the following values: 1:1 , 4:3 , 3:2 , 16:9 If you specify a value other than auto , Elastic Transcoder disables automatic detection of the aspect ratio.

jiTimeSpan :: Lens' JobInput (Maybe TimeSpan) Source #

Settings for clipping an input. Each input can have different clip settings.

jiEncryption :: Lens' JobInput (Maybe Encryption) Source #

The encryption settings, if any, that are used for decrypting your input files. If your input file is encrypted, you must specify the mode that Elastic Transcoder uses to decrypt your file.

jiKey :: Lens' JobInput (Maybe Text) Source #

The name of the file to transcode. Elsewhere in the body of the JSON block is the the ID of the pipeline to use for processing the job. The InputBucket object in that pipeline tells Elastic Transcoder which Amazon S3 bucket to get the file from. If the file name includes a prefix, such as cooking/lasagna.mpg , include the prefix in the key. If the file isn't in the specified bucket, Elastic Transcoder returns an error.

jiDetectedProperties :: Lens' JobInput (Maybe DetectedProperties) Source #

The detected properties of the input file.

jiContainer :: Lens' JobInput (Maybe Text) Source #

The container type for the input file. If you want Elastic Transcoder to automatically detect the container type of the input file, specify auto . If you want to specify the container type for the input file, enter one of the following values: 3gp , aac , asf , avi , divx , flv , m4a , mkv , mov , mp3 , mp4 , mpeg , mpeg-ps , mpeg-ts , mxf , ogg , vob , wav , webm

jiInterlaced :: Lens' JobInput (Maybe Text) Source #

Whether the input file is interlaced. If you want Elastic Transcoder to automatically detect whether the input file is interlaced, specify auto . If you want to specify whether the input file is interlaced, enter one of the following values: true , false If you specify a value other than auto , Elastic Transcoder disables automatic detection of interlacing.

jiInputCaptions :: Lens' JobInput (Maybe InputCaptions) Source #

You can configure Elastic Transcoder to transcode captions, or subtitles, from one format to another. All captions must be in UTF-8. Elastic Transcoder supports two types of captions: * Embedded: Embedded captions are included in the same file as the audio and video. Elastic Transcoder supports only one embedded caption per language, to a maximum of 300 embedded captions per file. Valid input values include: CEA-608 (EIA-608 , first non-empty channel only), CEA-708 (EIA-708 , first non-empty channel only), and mov-text Valid outputs include: mov-text Elastic Transcoder supports a maximum of one embedded format per output. * Sidecar: Sidecar captions are kept in a separate metadata file from the audio and video data. Sidecar captions require a player that is capable of understanding the relationship between the video file and the sidecar file. Elastic Transcoder supports only one sidecar caption per language, to a maximum of 20 sidecar captions per file. Valid input values include: dfxp (first div element only), ebu-tt , scc , smpt , srt , ttml (first div element only), and webvtt Valid outputs include: dfxp (first div element only), scc , srt , and webvtt . If you want ttml or smpte-tt compatible captions, specify dfxp as your output format. Elastic Transcoder does not support OCR (Optical Character Recognition), does not accept pictures as a valid input for captions, and is not available for audio-only transcoding. Elastic Transcoder does not preserve text formatting (for example, italics) during the transcoding process. To remove captions or leave the captions empty, set Captions to null. To pass through existing captions unchanged, set the MergePolicy to MergeRetain , and pass in a null CaptionSources array. For more information on embedded files, see the Subtitles Wikipedia page. For more information on sidecar files, see the Extensible Metadata Platform and Sidecar file Wikipedia pages.

JobOutput

data JobOutput Source #

Important: Outputs recommended instead.

If you specified one output for a job, information about that output. If you specified multiple outputs for a job, the Output object lists information about the first output. This duplicates the information that is listed for the first output in the Outputs object.

See: jobOutput smart constructor.

Instances

Eq JobOutput Source # 
Data JobOutput Source # 

Methods

gfoldl :: (forall d b. Data d => c (d -> b) -> d -> c b) -> (forall g. g -> c g) -> JobOutput -> c JobOutput #

gunfold :: (forall b r. Data b => c (b -> r) -> c r) -> (forall r. r -> c r) -> Constr -> c JobOutput #

toConstr :: JobOutput -> Constr #

dataTypeOf :: JobOutput -> DataType #

dataCast1 :: Typeable (* -> *) t => (forall d. Data d => c (t d)) -> Maybe (c JobOutput) #

dataCast2 :: Typeable (* -> * -> *) t => (forall d e. (Data d, Data e) => c (t d e)) -> Maybe (c JobOutput) #

gmapT :: (forall b. Data b => b -> b) -> JobOutput -> JobOutput #

gmapQl :: (r -> r' -> r) -> r -> (forall d. Data d => d -> r') -> JobOutput -> r #

gmapQr :: (r' -> r -> r) -> r -> (forall d. Data d => d -> r') -> JobOutput -> r #

gmapQ :: (forall d. Data d => d -> u) -> JobOutput -> [u] #

gmapQi :: Int -> (forall d. Data d => d -> u) -> JobOutput -> u #

gmapM :: Monad m => (forall d. Data d => d -> m d) -> JobOutput -> m JobOutput #

gmapMp :: MonadPlus m => (forall d. Data d => d -> m d) -> JobOutput -> m JobOutput #

gmapMo :: MonadPlus m => (forall d. Data d => d -> m d) -> JobOutput -> m JobOutput #

Read JobOutput Source # 
Show JobOutput Source # 
Generic JobOutput Source # 

Associated Types

type Rep JobOutput :: * -> * #

Hashable JobOutput Source # 
FromJSON JobOutput Source # 
NFData JobOutput Source # 

Methods

rnf :: JobOutput -> () #

type Rep JobOutput Source # 
type Rep JobOutput = D1 * (MetaData "JobOutput" "Network.AWS.ElasticTranscoder.Types.Product" "amazonka-elastictranscoder-1.6.0-38CPPD4da2X1lqPFWRIqIv" False) (C1 * (MetaCons "JobOutput'" PrefixI True) ((:*:) * ((:*:) * ((:*:) * ((:*:) * (S1 * (MetaSel (Just Symbol "_joAppliedColorSpaceConversion") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))) (S1 * (MetaSel (Just Symbol "_joThumbnailPattern") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text)))) ((:*:) * (S1 * (MetaSel (Just Symbol "_joStatus") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))) ((:*:) * (S1 * (MetaSel (Just Symbol "_joHeight") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Int))) (S1 * (MetaSel (Just Symbol "_joFrameRate") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text)))))) ((:*:) * ((:*:) * (S1 * (MetaSel (Just Symbol "_joCaptions") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Captions))) (S1 * (MetaSel (Just Symbol "_joPresetId") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text)))) ((:*:) * (S1 * (MetaSel (Just Symbol "_joComposition") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe [Clip]))) ((:*:) * (S1 * (MetaSel (Just Symbol "_joAlbumArt") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe JobAlbumArt))) (S1 * (MetaSel (Just Symbol "_joFileSize") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Integer))))))) ((:*:) * ((:*:) * ((:*:) * (S1 * (MetaSel (Just Symbol "_joWatermarks") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe [JobWatermark]))) (S1 * (MetaSel (Just Symbol "_joWidth") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Int)))) ((:*:) * (S1 * (MetaSel (Just Symbol "_joEncryption") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Encryption))) ((:*:) * (S1 * (MetaSel (Just Symbol "_joKey") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))) (S1 * (MetaSel (Just Symbol "_joStatusDetail") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text)))))) ((:*:) * ((:*:) * (S1 * (MetaSel (Just Symbol "_joId") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))) ((:*:) * (S1 * (MetaSel (Just Symbol "_joSegmentDuration") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))) (S1 * (MetaSel (Just Symbol "_joDurationMillis") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Integer))))) ((:*:) * (S1 * (MetaSel (Just Symbol "_joThumbnailEncryption") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Encryption))) ((:*:) * (S1 * (MetaSel (Just Symbol "_joDuration") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Integer))) (S1 * (MetaSel (Just Symbol "_joRotate") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text)))))))))

jobOutput :: JobOutput Source #

Creates a value of JobOutput with the minimum fields required to make a request.

Use one of the following lenses to modify other fields as desired:

  • joAppliedColorSpaceConversion - If Elastic Transcoder used a preset with a ColorSpaceConversionMode to transcode the output file, the AppliedColorSpaceConversion parameter shows the conversion used. If no ColorSpaceConversionMode was defined in the preset, this parameter is not be included in the job response.
  • joThumbnailPattern - Whether you want Elastic Transcoder to create thumbnails for your videos and, if so, how you want Elastic Transcoder to name the files. If you don't want Elastic Transcoder to create thumbnails, specify "". If you do want Elastic Transcoder to create thumbnails, specify the information that you want to include in the file name for each thumbnail. You can specify the following values in any sequence: * {count} (Required) : If you want to create thumbnails, you must include {count} in the ThumbnailPattern object. Wherever you specify {count} , Elastic Transcoder adds a five-digit sequence number (beginning with 00001 ) to thumbnail file names. The number indicates where a given thumbnail appears in the sequence of thumbnails for a transcoded file. Important: If you specify a literal value and/or {resolution} but you omit {count} , Elastic Transcoder returns a validation error and does not create the job. * Literal values (Optional) : You can specify literal values anywhere in the ThumbnailPattern object. For example, you can include them as a file name prefix or as a delimiter between {resolution} and {count} . * {resolution} (Optional) : If you want Elastic Transcoder to include the resolution in the file name, include {resolution} in the ThumbnailPattern object. When creating thumbnails, Elastic Transcoder automatically saves the files in the format (.jpg or .png) that appears in the preset that you specified in the PresetID value of CreateJobOutput . Elastic Transcoder also appends the applicable file name extension.
  • joStatus - The status of one output in a job. If you specified only one output for the job, Outputs:Status is always the same as Job:Status . If you specified more than one output: * Job:Status and Outputs:Status for all of the outputs is Submitted until Elastic Transcoder starts to process the first output. * When Elastic Transcoder starts to process the first output, Outputs:Status for that output and Job:Status both change to Progressing. For each output, the value of Outputs:Status remains Submitted until Elastic Transcoder starts to process the output. * Job:Status remains Progressing until all of the outputs reach a terminal status, either Complete or Error. * When all of the outputs reach a terminal status, Job:Status changes to Complete only if Outputs:Status for all of the outputs is Complete . If Outputs:Status for one or more outputs is Error , the terminal status for Job:Status is also Error . The value of Status is one of the following: Submitted , Progressing , Complete , Canceled , or Error .
  • joHeight - Height of the output file, in pixels.
  • joFrameRate - Frame rate of the output file, in frames per second.
  • joCaptions - You can configure Elastic Transcoder to transcode captions, or subtitles, from one format to another. All captions must be in UTF-8. Elastic Transcoder supports two types of captions: * Embedded: Embedded captions are included in the same file as the audio and video. Elastic Transcoder supports only one embedded caption per language, to a maximum of 300 embedded captions per file. Valid input values include: CEA-608 (EIA-608 , first non-empty channel only), CEA-708 (EIA-708 , first non-empty channel only), and mov-text Valid outputs include: mov-text Elastic Transcoder supports a maximum of one embedded format per output. * Sidecar: Sidecar captions are kept in a separate metadata file from the audio and video data. Sidecar captions require a player that is capable of understanding the relationship between the video file and the sidecar file. Elastic Transcoder supports only one sidecar caption per language, to a maximum of 20 sidecar captions per file. Valid input values include: dfxp (first div element only), ebu-tt , scc , smpt , srt , ttml (first div element only), and webvtt Valid outputs include: dfxp (first div element only), scc , srt , and webvtt . If you want ttml or smpte-tt compatible captions, specify dfxp as your output format. Elastic Transcoder does not support OCR (Optical Character Recognition), does not accept pictures as a valid input for captions, and is not available for audio-only transcoding. Elastic Transcoder does not preserve text formatting (for example, italics) during the transcoding process. To remove captions or leave the captions empty, set Captions to null. To pass through existing captions unchanged, set the MergePolicy to MergeRetain , and pass in a null CaptionSources array. For more information on embedded files, see the Subtitles Wikipedia page. For more information on sidecar files, see the Extensible Metadata Platform and Sidecar file Wikipedia pages.
  • joPresetId - The value of the Id object for the preset that you want to use for this job. The preset determines the audio, video, and thumbnail settings that Elastic Transcoder uses for transcoding. To use a preset that you created, specify the preset ID that Elastic Transcoder returned in the response when you created the preset. You can also use the Elastic Transcoder system presets, which you can get with ListPresets .
  • joComposition - You can create an output file that contains an excerpt from the input file. This excerpt, called a clip, can come from the beginning, middle, or end of the file. The Composition object contains settings for the clips that make up an output file. For the current release, you can only specify settings for a single clip per output file. The Composition object cannot be null.
  • joAlbumArt - The album art to be associated with the output file, if any.
  • joFileSize - File size of the output file, in bytes.
  • joWatermarks - Information about the watermarks that you want Elastic Transcoder to add to the video during transcoding. You can specify up to four watermarks for each output. Settings for each watermark must be defined in the preset that you specify in Preset for the current output. Watermarks are added to the output video in the sequence in which you list them in the job output—the first watermark in the list is added to the output video first, the second watermark in the list is added next, and so on. As a result, if the settings in a preset cause Elastic Transcoder to place all watermarks in the same location, the second watermark that you add covers the first one, the third one covers the second, and the fourth one covers the third.
  • joWidth - Specifies the width of the output file in pixels.
  • joEncryption - The encryption settings, if any, that you want Elastic Transcoder to apply to your output files. If you choose to use encryption, you must specify a mode to use. If you choose not to use encryption, Elastic Transcoder writes an unencrypted file to your Amazon S3 bucket.
  • joKey - The name to assign to the transcoded file. Elastic Transcoder saves the file in the Amazon S3 bucket specified by the OutputBucket object in the pipeline that is specified by the pipeline ID.
  • joStatusDetail - Information that further explains Status .
  • joId - A sequential counter, starting with 1, that identifies an output among the outputs from the current job. In the Output syntax, this value is always 1.
  • joSegmentDuration - Important: (Outputs in Fragmented MP4 or MPEG-TS format only. If you specify a preset in PresetId for which the value of Container is fmp4 (Fragmented MP4) or ts (MPEG-TS), SegmentDuration is the target maximum duration of each segment in seconds. For HLSv3 format playlists, each media segment is stored in a separate .ts file. For HLSv4 , MPEG-DASH , and Smooth playlists, all media segments for an output are stored in a single file. Each segment is approximately the length of the SegmentDuration , though individual segments might be shorter or longer. The range of valid values is 1 to 60 seconds. If the duration of the video is not evenly divisible by SegmentDuration , the duration of the last segment is the remainder of total length/SegmentDuration. Elastic Transcoder creates an output-specific playlist for each output HLS output that you specify in OutputKeys. To add an output to the master playlist for this job, include it in the OutputKeys of the associated playlist.
  • joDurationMillis - Duration of the output file, in milliseconds.
  • joThumbnailEncryption - The encryption settings, if any, that you want Elastic Transcoder to apply to your thumbnail.
  • joDuration - Duration of the output file, in seconds.
  • joRotate - The number of degrees clockwise by which you want Elastic Transcoder to rotate the output relative to the input. Enter one of the following values: auto , 0 , 90 , 180 , 270 The value auto generally works only if the file that you're transcoding contains rotation metadata.

joAppliedColorSpaceConversion :: Lens' JobOutput (Maybe Text) Source #

If Elastic Transcoder used a preset with a ColorSpaceConversionMode to transcode the output file, the AppliedColorSpaceConversion parameter shows the conversion used. If no ColorSpaceConversionMode was defined in the preset, this parameter is not be included in the job response.

joThumbnailPattern :: Lens' JobOutput (Maybe Text) Source #

Whether you want Elastic Transcoder to create thumbnails for your videos and, if so, how you want Elastic Transcoder to name the files. If you don't want Elastic Transcoder to create thumbnails, specify "". If you do want Elastic Transcoder to create thumbnails, specify the information that you want to include in the file name for each thumbnail. You can specify the following values in any sequence: * {count} (Required) : If you want to create thumbnails, you must include {count} in the ThumbnailPattern object. Wherever you specify {count} , Elastic Transcoder adds a five-digit sequence number (beginning with 00001 ) to thumbnail file names. The number indicates where a given thumbnail appears in the sequence of thumbnails for a transcoded file. Important: If you specify a literal value and/or {resolution} but you omit {count} , Elastic Transcoder returns a validation error and does not create the job. * Literal values (Optional) : You can specify literal values anywhere in the ThumbnailPattern object. For example, you can include them as a file name prefix or as a delimiter between {resolution} and {count} . * {resolution} (Optional) : If you want Elastic Transcoder to include the resolution in the file name, include {resolution} in the ThumbnailPattern object. When creating thumbnails, Elastic Transcoder automatically saves the files in the format (.jpg or .png) that appears in the preset that you specified in the PresetID value of CreateJobOutput . Elastic Transcoder also appends the applicable file name extension.

joStatus :: Lens' JobOutput (Maybe Text) Source #

The status of one output in a job. If you specified only one output for the job, Outputs:Status is always the same as Job:Status . If you specified more than one output: * Job:Status and Outputs:Status for all of the outputs is Submitted until Elastic Transcoder starts to process the first output. * When Elastic Transcoder starts to process the first output, Outputs:Status for that output and Job:Status both change to Progressing. For each output, the value of Outputs:Status remains Submitted until Elastic Transcoder starts to process the output. * Job:Status remains Progressing until all of the outputs reach a terminal status, either Complete or Error. * When all of the outputs reach a terminal status, Job:Status changes to Complete only if Outputs:Status for all of the outputs is Complete . If Outputs:Status for one or more outputs is Error , the terminal status for Job:Status is also Error . The value of Status is one of the following: Submitted , Progressing , Complete , Canceled , or Error .

joHeight :: Lens' JobOutput (Maybe Int) Source #

Height of the output file, in pixels.

joFrameRate :: Lens' JobOutput (Maybe Text) Source #

Frame rate of the output file, in frames per second.

joCaptions :: Lens' JobOutput (Maybe Captions) Source #

You can configure Elastic Transcoder to transcode captions, or subtitles, from one format to another. All captions must be in UTF-8. Elastic Transcoder supports two types of captions: * Embedded: Embedded captions are included in the same file as the audio and video. Elastic Transcoder supports only one embedded caption per language, to a maximum of 300 embedded captions per file. Valid input values include: CEA-608 (EIA-608 , first non-empty channel only), CEA-708 (EIA-708 , first non-empty channel only), and mov-text Valid outputs include: mov-text Elastic Transcoder supports a maximum of one embedded format per output. * Sidecar: Sidecar captions are kept in a separate metadata file from the audio and video data. Sidecar captions require a player that is capable of understanding the relationship between the video file and the sidecar file. Elastic Transcoder supports only one sidecar caption per language, to a maximum of 20 sidecar captions per file. Valid input values include: dfxp (first div element only), ebu-tt , scc , smpt , srt , ttml (first div element only), and webvtt Valid outputs include: dfxp (first div element only), scc , srt , and webvtt . If you want ttml or smpte-tt compatible captions, specify dfxp as your output format. Elastic Transcoder does not support OCR (Optical Character Recognition), does not accept pictures as a valid input for captions, and is not available for audio-only transcoding. Elastic Transcoder does not preserve text formatting (for example, italics) during the transcoding process. To remove captions or leave the captions empty, set Captions to null. To pass through existing captions unchanged, set the MergePolicy to MergeRetain , and pass in a null CaptionSources array. For more information on embedded files, see the Subtitles Wikipedia page. For more information on sidecar files, see the Extensible Metadata Platform and Sidecar file Wikipedia pages.

joPresetId :: Lens' JobOutput (Maybe Text) Source #

The value of the Id object for the preset that you want to use for this job. The preset determines the audio, video, and thumbnail settings that Elastic Transcoder uses for transcoding. To use a preset that you created, specify the preset ID that Elastic Transcoder returned in the response when you created the preset. You can also use the Elastic Transcoder system presets, which you can get with ListPresets .

joComposition :: Lens' JobOutput [Clip] Source #

You can create an output file that contains an excerpt from the input file. This excerpt, called a clip, can come from the beginning, middle, or end of the file. The Composition object contains settings for the clips that make up an output file. For the current release, you can only specify settings for a single clip per output file. The Composition object cannot be null.

joAlbumArt :: Lens' JobOutput (Maybe JobAlbumArt) Source #

The album art to be associated with the output file, if any.

joFileSize :: Lens' JobOutput (Maybe Integer) Source #

File size of the output file, in bytes.

joWatermarks :: Lens' JobOutput [JobWatermark] Source #

Information about the watermarks that you want Elastic Transcoder to add to the video during transcoding. You can specify up to four watermarks for each output. Settings for each watermark must be defined in the preset that you specify in Preset for the current output. Watermarks are added to the output video in the sequence in which you list them in the job output—the first watermark in the list is added to the output video first, the second watermark in the list is added next, and so on. As a result, if the settings in a preset cause Elastic Transcoder to place all watermarks in the same location, the second watermark that you add covers the first one, the third one covers the second, and the fourth one covers the third.

joWidth :: Lens' JobOutput (Maybe Int) Source #

Specifies the width of the output file in pixels.

joEncryption :: Lens' JobOutput (Maybe Encryption) Source #

The encryption settings, if any, that you want Elastic Transcoder to apply to your output files. If you choose to use encryption, you must specify a mode to use. If you choose not to use encryption, Elastic Transcoder writes an unencrypted file to your Amazon S3 bucket.

joKey :: Lens' JobOutput (Maybe Text) Source #

The name to assign to the transcoded file. Elastic Transcoder saves the file in the Amazon S3 bucket specified by the OutputBucket object in the pipeline that is specified by the pipeline ID.

joStatusDetail :: Lens' JobOutput (Maybe Text) Source #

Information that further explains Status .

joId :: Lens' JobOutput (Maybe Text) Source #

A sequential counter, starting with 1, that identifies an output among the outputs from the current job. In the Output syntax, this value is always 1.

joSegmentDuration :: Lens' JobOutput (Maybe Text) Source #

Important: (Outputs in Fragmented MP4 or MPEG-TS format only. If you specify a preset in PresetId for which the value of Container is fmp4 (Fragmented MP4) or ts (MPEG-TS), SegmentDuration is the target maximum duration of each segment in seconds. For HLSv3 format playlists, each media segment is stored in a separate .ts file. For HLSv4 , MPEG-DASH , and Smooth playlists, all media segments for an output are stored in a single file. Each segment is approximately the length of the SegmentDuration , though individual segments might be shorter or longer. The range of valid values is 1 to 60 seconds. If the duration of the video is not evenly divisible by SegmentDuration , the duration of the last segment is the remainder of total length/SegmentDuration. Elastic Transcoder creates an output-specific playlist for each output HLS output that you specify in OutputKeys. To add an output to the master playlist for this job, include it in the OutputKeys of the associated playlist.

joDurationMillis :: Lens' JobOutput (Maybe Integer) Source #

Duration of the output file, in milliseconds.

joThumbnailEncryption :: Lens' JobOutput (Maybe Encryption) Source #

The encryption settings, if any, that you want Elastic Transcoder to apply to your thumbnail.

joDuration :: Lens' JobOutput (Maybe Integer) Source #

Duration of the output file, in seconds.

joRotate :: Lens' JobOutput (Maybe Text) Source #

The number of degrees clockwise by which you want Elastic Transcoder to rotate the output relative to the input. Enter one of the following values: auto , 0 , 90 , 180 , 270 The value auto generally works only if the file that you're transcoding contains rotation metadata.

JobWatermark

data JobWatermark Source #

Watermarks can be in .png or .jpg format. If you want to display a watermark that is not rectangular, use the .png format, which supports transparency.

See: jobWatermark smart constructor.

Instances

Eq JobWatermark Source # 
Data JobWatermark Source # 

Methods

gfoldl :: (forall d b. Data d => c (d -> b) -> d -> c b) -> (forall g. g -> c g) -> JobWatermark -> c JobWatermark #

gunfold :: (forall b r. Data b => c (b -> r) -> c r) -> (forall r. r -> c r) -> Constr -> c JobWatermark #

toConstr :: JobWatermark -> Constr #

dataTypeOf :: JobWatermark -> DataType #

dataCast1 :: Typeable (* -> *) t => (forall d. Data d => c (t d)) -> Maybe (c JobWatermark) #

dataCast2 :: Typeable (* -> * -> *) t => (forall d e. (Data d, Data e) => c (t d e)) -> Maybe (c JobWatermark) #

gmapT :: (forall b. Data b => b -> b) -> JobWatermark -> JobWatermark #

gmapQl :: (r -> r' -> r) -> r -> (forall d. Data d => d -> r') -> JobWatermark -> r #

gmapQr :: (r' -> r -> r) -> r -> (forall d. Data d => d -> r') -> JobWatermark -> r #

gmapQ :: (forall d. Data d => d -> u) -> JobWatermark -> [u] #

gmapQi :: Int -> (forall d. Data d => d -> u) -> JobWatermark -> u #

gmapM :: Monad m => (forall d. Data d => d -> m d) -> JobWatermark -> m JobWatermark #

gmapMp :: MonadPlus m => (forall d. Data d => d -> m d) -> JobWatermark -> m JobWatermark #

gmapMo :: MonadPlus m => (forall d. Data d => d -> m d) -> JobWatermark -> m JobWatermark #

Read JobWatermark Source # 
Show JobWatermark Source # 
Generic JobWatermark Source # 

Associated Types

type Rep JobWatermark :: * -> * #

Hashable JobWatermark Source # 
ToJSON JobWatermark Source # 
FromJSON JobWatermark Source # 
NFData JobWatermark Source # 

Methods

rnf :: JobWatermark -> () #

type Rep JobWatermark Source # 
type Rep JobWatermark = D1 * (MetaData "JobWatermark" "Network.AWS.ElasticTranscoder.Types.Product" "amazonka-elastictranscoder-1.6.0-38CPPD4da2X1lqPFWRIqIv" False) (C1 * (MetaCons "JobWatermark'" PrefixI True) ((:*:) * (S1 * (MetaSel (Just Symbol "_jwPresetWatermarkId") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))) ((:*:) * (S1 * (MetaSel (Just Symbol "_jwInputKey") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))) (S1 * (MetaSel (Just Symbol "_jwEncryption") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Encryption))))))

jobWatermark :: JobWatermark Source #

Creates a value of JobWatermark with the minimum fields required to make a request.

Use one of the following lenses to modify other fields as desired:

  • jwPresetWatermarkId - The ID of the watermark settings that Elastic Transcoder uses to add watermarks to the video during transcoding. The settings are in the preset specified by Preset for the current output. In that preset, the value of Watermarks Id tells Elastic Transcoder which settings to use.
  • jwInputKey - The name of the .png or .jpg file that you want to use for the watermark. To determine which Amazon S3 bucket contains the specified file, Elastic Transcoder checks the pipeline specified by Pipeline ; the Input Bucket object in that pipeline identifies the bucket. If the file name includes a prefix, for example, logos/128x64.png , include the prefix in the key. If the file isn't in the specified bucket, Elastic Transcoder returns an error.
  • jwEncryption - The encryption settings, if any, that you want Elastic Transcoder to apply to your watermarks.

jwPresetWatermarkId :: Lens' JobWatermark (Maybe Text) Source #

The ID of the watermark settings that Elastic Transcoder uses to add watermarks to the video during transcoding. The settings are in the preset specified by Preset for the current output. In that preset, the value of Watermarks Id tells Elastic Transcoder which settings to use.

jwInputKey :: Lens' JobWatermark (Maybe Text) Source #

The name of the .png or .jpg file that you want to use for the watermark. To determine which Amazon S3 bucket contains the specified file, Elastic Transcoder checks the pipeline specified by Pipeline ; the Input Bucket object in that pipeline identifies the bucket. If the file name includes a prefix, for example, logos/128x64.png , include the prefix in the key. If the file isn't in the specified bucket, Elastic Transcoder returns an error.

jwEncryption :: Lens' JobWatermark (Maybe Encryption) Source #

The encryption settings, if any, that you want Elastic Transcoder to apply to your watermarks.

Notifications

data Notifications Source #

The Amazon Simple Notification Service (Amazon SNS) topic or topics to notify in order to report job status.

Important: To receive notifications, you must also subscribe to the new topic in the Amazon SNS console.

See: notifications smart constructor.

Instances

Eq Notifications Source # 
Data Notifications Source # 

Methods

gfoldl :: (forall d b. Data d => c (d -> b) -> d -> c b) -> (forall g. g -> c g) -> Notifications -> c Notifications #

gunfold :: (forall b r. Data b => c (b -> r) -> c r) -> (forall r. r -> c r) -> Constr -> c Notifications #

toConstr :: Notifications -> Constr #

dataTypeOf :: Notifications -> DataType #

dataCast1 :: Typeable (* -> *) t => (forall d. Data d => c (t d)) -> Maybe (c Notifications) #

dataCast2 :: Typeable (* -> * -> *) t => (forall d e. (Data d, Data e) => c (t d e)) -> Maybe (c Notifications) #

gmapT :: (forall b. Data b => b -> b) -> Notifications -> Notifications #

gmapQl :: (r -> r' -> r) -> r -> (forall d. Data d => d -> r') -> Notifications -> r #

gmapQr :: (r' -> r -> r) -> r -> (forall d. Data d => d -> r') -> Notifications -> r #

gmapQ :: (forall d. Data d => d -> u) -> Notifications -> [u] #

gmapQi :: Int -> (forall d. Data d => d -> u) -> Notifications -> u #

gmapM :: Monad m => (forall d. Data d => d -> m d) -> Notifications -> m Notifications #

gmapMp :: MonadPlus m => (forall d. Data d => d -> m d) -> Notifications -> m Notifications #

gmapMo :: MonadPlus m => (forall d. Data d => d -> m d) -> Notifications -> m Notifications #

Read Notifications Source # 
Show Notifications Source # 
Generic Notifications Source # 

Associated Types

type Rep Notifications :: * -> * #

Hashable Notifications Source # 
ToJSON Notifications Source # 
FromJSON Notifications Source # 
NFData Notifications Source # 

Methods

rnf :: Notifications -> () #

type Rep Notifications Source # 
type Rep Notifications = D1 * (MetaData "Notifications" "Network.AWS.ElasticTranscoder.Types.Product" "amazonka-elastictranscoder-1.6.0-38CPPD4da2X1lqPFWRIqIv" False) (C1 * (MetaCons "Notifications'" PrefixI True) ((:*:) * ((:*:) * (S1 * (MetaSel (Just Symbol "_nError") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))) (S1 * (MetaSel (Just Symbol "_nWarning") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text)))) ((:*:) * (S1 * (MetaSel (Just Symbol "_nProgressing") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))) (S1 * (MetaSel (Just Symbol "_nCompleted") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))))))

notifications :: Notifications Source #

Creates a value of Notifications with the minimum fields required to make a request.

Use one of the following lenses to modify other fields as desired:

  • nError - The Amazon SNS topic that you want to notify when Elastic Transcoder encounters an error condition.
  • nWarning - The Amazon SNS topic that you want to notify when Elastic Transcoder encounters a warning condition.
  • nProgressing - The Amazon Simple Notification Service (Amazon SNS) topic that you want to notify when Elastic Transcoder has started to process the job.
  • nCompleted - The Amazon SNS topic that you want to notify when Elastic Transcoder has finished processing the job.

nError :: Lens' Notifications (Maybe Text) Source #

The Amazon SNS topic that you want to notify when Elastic Transcoder encounters an error condition.

nWarning :: Lens' Notifications (Maybe Text) Source #

The Amazon SNS topic that you want to notify when Elastic Transcoder encounters a warning condition.

nProgressing :: Lens' Notifications (Maybe Text) Source #

The Amazon Simple Notification Service (Amazon SNS) topic that you want to notify when Elastic Transcoder has started to process the job.

nCompleted :: Lens' Notifications (Maybe Text) Source #

The Amazon SNS topic that you want to notify when Elastic Transcoder has finished processing the job.

Permission

data Permission Source #

The Permission structure.

See: permission smart constructor.

Instances

Eq Permission Source # 
Data Permission Source # 

Methods

gfoldl :: (forall d b. Data d => c (d -> b) -> d -> c b) -> (forall g. g -> c g) -> Permission -> c Permission #

gunfold :: (forall b r. Data b => c (b -> r) -> c r) -> (forall r. r -> c r) -> Constr -> c Permission #

toConstr :: Permission -> Constr #

dataTypeOf :: Permission -> DataType #

dataCast1 :: Typeable (* -> *) t => (forall d. Data d => c (t d)) -> Maybe (c Permission) #

dataCast2 :: Typeable (* -> * -> *) t => (forall d e. (Data d, Data e) => c (t d e)) -> Maybe (c Permission) #

gmapT :: (forall b. Data b => b -> b) -> Permission -> Permission #

gmapQl :: (r -> r' -> r) -> r -> (forall d. Data d => d -> r') -> Permission -> r #

gmapQr :: (r' -> r -> r) -> r -> (forall d. Data d => d -> r') -> Permission -> r #

gmapQ :: (forall d. Data d => d -> u) -> Permission -> [u] #

gmapQi :: Int -> (forall d. Data d => d -> u) -> Permission -> u #

gmapM :: Monad m => (forall d. Data d => d -> m d) -> Permission -> m Permission #

gmapMp :: MonadPlus m => (forall d. Data d => d -> m d) -> Permission -> m Permission #

gmapMo :: MonadPlus m => (forall d. Data d => d -> m d) -> Permission -> m Permission #

Read Permission Source # 
Show Permission Source # 
Generic Permission Source # 

Associated Types

type Rep Permission :: * -> * #

Hashable Permission Source # 
ToJSON Permission Source # 
FromJSON Permission Source # 
NFData Permission Source # 

Methods

rnf :: Permission -> () #

type Rep Permission Source # 
type Rep Permission = D1 * (MetaData "Permission" "Network.AWS.ElasticTranscoder.Types.Product" "amazonka-elastictranscoder-1.6.0-38CPPD4da2X1lqPFWRIqIv" False) (C1 * (MetaCons "Permission'" PrefixI True) ((:*:) * (S1 * (MetaSel (Just Symbol "_pAccess") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe [Text]))) ((:*:) * (S1 * (MetaSel (Just Symbol "_pGranteeType") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))) (S1 * (MetaSel (Just Symbol "_pGrantee") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))))))

permission :: Permission Source #

Creates a value of Permission with the minimum fields required to make a request.

Use one of the following lenses to modify other fields as desired:

  • pAccess - The permission that you want to give to the AWS user that is listed in Grantee. Valid values include: * READ : The grantee can read the thumbnails and metadata for thumbnails that Elastic Transcoder adds to the Amazon S3 bucket. * READ_ACP : The grantee can read the object ACL for thumbnails that Elastic Transcoder adds to the Amazon S3 bucket. * WRITE_ACP : The grantee can write the ACL for the thumbnails that Elastic Transcoder adds to the Amazon S3 bucket. * FULL_CONTROL : The grantee has READ, READ_ACP, and WRITE_ACP permissions for the thumbnails that Elastic Transcoder adds to the Amazon S3 bucket.
  • pGranteeType - The type of value that appears in the Grantee object: * Canonical : Either the canonical user ID for an AWS account or an origin access identity for an Amazon CloudFront distribution. Important: A canonical user ID is not the same as an AWS account number. * Email : The registered email address of an AWS account. * Group : One of the following predefined Amazon S3 groups: AllUsers , AuthenticatedUsers , or LogDelivery .
  • pGrantee - The AWS user or group that you want to have access to transcoded files and playlists. To identify the user or group, you can specify the canonical user ID for an AWS account, an origin access identity for a CloudFront distribution, the registered email address of an AWS account, or a predefined Amazon S3 group.

pAccess :: Lens' Permission [Text] Source #

The permission that you want to give to the AWS user that is listed in Grantee. Valid values include: * READ : The grantee can read the thumbnails and metadata for thumbnails that Elastic Transcoder adds to the Amazon S3 bucket. * READ_ACP : The grantee can read the object ACL for thumbnails that Elastic Transcoder adds to the Amazon S3 bucket. * WRITE_ACP : The grantee can write the ACL for the thumbnails that Elastic Transcoder adds to the Amazon S3 bucket. * FULL_CONTROL : The grantee has READ, READ_ACP, and WRITE_ACP permissions for the thumbnails that Elastic Transcoder adds to the Amazon S3 bucket.

pGranteeType :: Lens' Permission (Maybe Text) Source #

The type of value that appears in the Grantee object: * Canonical : Either the canonical user ID for an AWS account or an origin access identity for an Amazon CloudFront distribution. Important: A canonical user ID is not the same as an AWS account number. * Email : The registered email address of an AWS account. * Group : One of the following predefined Amazon S3 groups: AllUsers , AuthenticatedUsers , or LogDelivery .

pGrantee :: Lens' Permission (Maybe Text) Source #

The AWS user or group that you want to have access to transcoded files and playlists. To identify the user or group, you can specify the canonical user ID for an AWS account, an origin access identity for a CloudFront distribution, the registered email address of an AWS account, or a predefined Amazon S3 group.

Pipeline

data Pipeline Source #

The pipeline (queue) that is used to manage jobs.

See: pipeline smart constructor.

Instances

Eq Pipeline Source # 
Data Pipeline Source # 

Methods

gfoldl :: (forall d b. Data d => c (d -> b) -> d -> c b) -> (forall g. g -> c g) -> Pipeline -> c Pipeline #

gunfold :: (forall b r. Data b => c (b -> r) -> c r) -> (forall r. r -> c r) -> Constr -> c Pipeline #

toConstr :: Pipeline -> Constr #

dataTypeOf :: Pipeline -> DataType #

dataCast1 :: Typeable (* -> *) t => (forall d. Data d => c (t d)) -> Maybe (c Pipeline) #

dataCast2 :: Typeable (* -> * -> *) t => (forall d e. (Data d, Data e) => c (t d e)) -> Maybe (c Pipeline) #

gmapT :: (forall b. Data b => b -> b) -> Pipeline -> Pipeline #

gmapQl :: (r -> r' -> r) -> r -> (forall d. Data d => d -> r') -> Pipeline -> r #

gmapQr :: (r' -> r -> r) -> r -> (forall d. Data d => d -> r') -> Pipeline -> r #

gmapQ :: (forall d. Data d => d -> u) -> Pipeline -> [u] #

gmapQi :: Int -> (forall d. Data d => d -> u) -> Pipeline -> u #

gmapM :: Monad m => (forall d. Data d => d -> m d) -> Pipeline -> m Pipeline #

gmapMp :: MonadPlus m => (forall d. Data d => d -> m d) -> Pipeline -> m Pipeline #

gmapMo :: MonadPlus m => (forall d. Data d => d -> m d) -> Pipeline -> m Pipeline #

Read Pipeline Source # 
Show Pipeline Source # 
Generic Pipeline Source # 

Associated Types

type Rep Pipeline :: * -> * #

Methods

from :: Pipeline -> Rep Pipeline x #

to :: Rep Pipeline x -> Pipeline #

Hashable Pipeline Source # 

Methods

hashWithSalt :: Int -> Pipeline -> Int #

hash :: Pipeline -> Int #

FromJSON Pipeline Source # 
NFData Pipeline Source # 

Methods

rnf :: Pipeline -> () #

type Rep Pipeline Source # 
type Rep Pipeline = D1 * (MetaData "Pipeline" "Network.AWS.ElasticTranscoder.Types.Product" "amazonka-elastictranscoder-1.6.0-38CPPD4da2X1lqPFWRIqIv" False) (C1 * (MetaCons "Pipeline'" PrefixI True) ((:*:) * ((:*:) * ((:*:) * (S1 * (MetaSel (Just Symbol "_pipStatus") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))) (S1 * (MetaSel (Just Symbol "_pipARN") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text)))) ((:*:) * (S1 * (MetaSel (Just Symbol "_pipInputBucket") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))) ((:*:) * (S1 * (MetaSel (Just Symbol "_pipContentConfig") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe PipelineOutputConfig))) (S1 * (MetaSel (Just Symbol "_pipOutputBucket") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text)))))) ((:*:) * ((:*:) * (S1 * (MetaSel (Just Symbol "_pipRole") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))) ((:*:) * (S1 * (MetaSel (Just Symbol "_pipName") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))) (S1 * (MetaSel (Just Symbol "_pipAWSKMSKeyARN") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))))) ((:*:) * (S1 * (MetaSel (Just Symbol "_pipId") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))) ((:*:) * (S1 * (MetaSel (Just Symbol "_pipNotifications") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Notifications))) (S1 * (MetaSel (Just Symbol "_pipThumbnailConfig") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe PipelineOutputConfig))))))))

pipeline :: Pipeline Source #

Creates a value of Pipeline with the minimum fields required to make a request.

Use one of the following lenses to modify other fields as desired:

  • pipStatus - The current status of the pipeline: * Active : The pipeline is processing jobs. * Paused : The pipeline is not currently processing jobs.
  • pipARN - The Amazon Resource Name (ARN) for the pipeline.
  • pipInputBucket - The Amazon S3 bucket from which Elastic Transcoder gets media files for transcoding and the graphics files, if any, that you want to use for watermarks.
  • pipContentConfig - Information about the Amazon S3 bucket in which you want Elastic Transcoder to save transcoded files and playlists. Either you specify both ContentConfig and ThumbnailConfig , or you specify OutputBucket . * Bucket : The Amazon S3 bucket in which you want Elastic Transcoder to save transcoded files and playlists. * Permissions : A list of the users and/or predefined Amazon S3 groups you want to have access to transcoded files and playlists, and the type of access that you want them to have. * GranteeType: The type of value that appears in the Grantee object: * Canonical : Either the canonical user ID for an AWS account or an origin access identity for an Amazon CloudFront distribution. * Email : The registered email address of an AWS account. * Group : One of the following predefined Amazon S3 groups: AllUsers , AuthenticatedUsers , or LogDelivery . * Grantee : The AWS user or group that you want to have access to transcoded files and playlists. * Access : The permission that you want to give to the AWS user that is listed in Grantee . Valid values include: * READ : The grantee can read the objects and metadata for objects that Elastic Transcoder adds to the Amazon S3 bucket. * READ_ACP : The grantee can read the object ACL for objects that Elastic Transcoder adds to the Amazon S3 bucket. * WRITE_ACP : The grantee can write the ACL for the objects that Elastic Transcoder adds to the Amazon S3 bucket. * FULL_CONTROL : The grantee has READ , READ_ACP , and WRITE_ACP permissions for the objects that Elastic Transcoder adds to the Amazon S3 bucket. * StorageClass : The Amazon S3 storage class, Standard or ReducedRedundancy, that you want Elastic Transcoder to assign to the video files and playlists that it stores in your Amazon S3 bucket.
  • pipOutputBucket - The Amazon S3 bucket in which you want Elastic Transcoder to save transcoded files, thumbnails, and playlists. Either you specify this value, or you specify both ContentConfig and ThumbnailConfig .
  • pipRole - The IAM Amazon Resource Name (ARN) for the role that Elastic Transcoder uses to transcode jobs for this pipeline.
  • pipName - The name of the pipeline. We recommend that the name be unique within the AWS account, but uniqueness is not enforced. Constraints: Maximum 40 characters
  • pipAWSKMSKeyARN - The AWS Key Management Service (AWS KMS) key that you want to use with this pipeline. If you use either S3 or S3-AWS-KMS as your Encryption:Mode , you don't need to provide a key with your job because a default key, known as an AWS-KMS key, is created for you automatically. You need to provide an AWS-KMS key only if you want to use a non-default AWS-KMS key, or if you are using an Encryption:Mode of AES-PKCS7 , AES-CTR , or AES-GCM .
  • pipId - The identifier for the pipeline. You use this value to identify the pipeline in which you want to perform a variety of operations, such as creating a job or a preset.
  • pipNotifications - The Amazon Simple Notification Service (Amazon SNS) topic that you want to notify to report job status. Important: To receive notifications, you must also subscribe to the new topic in the Amazon SNS console. * Progressing (optional): The Amazon Simple Notification Service (Amazon SNS) topic that you want to notify when Elastic Transcoder has started to process the job. * Completed (optional): The Amazon SNS topic that you want to notify when Elastic Transcoder has finished processing the job. * Warning (optional): The Amazon SNS topic that you want to notify when Elastic Transcoder encounters a warning condition. * Error (optional): The Amazon SNS topic that you want to notify when Elastic Transcoder encounters an error condition.
  • pipThumbnailConfig - Information about the Amazon S3 bucket in which you want Elastic Transcoder to save thumbnail files. Either you specify both ContentConfig and ThumbnailConfig , or you specify OutputBucket . * Bucket : The Amazon S3 bucket in which you want Elastic Transcoder to save thumbnail files. * Permissions : A list of the users andor predefined Amazon S3 groups you want to have access to thumbnail files, and the type of access that you want them to have. * GranteeType: The type of value that appears in the Grantee object: * Canonical : Either the canonical user ID for an AWS account or an origin access identity for an Amazon CloudFront distribution. Important:/ A canonical user ID is not the same as an AWS account number. * Email : The registered email address of an AWS account. * Group : One of the following predefined Amazon S3 groups: AllUsers , AuthenticatedUsers , or LogDelivery . * Grantee : The AWS user or group that you want to have access to thumbnail files. * Access: The permission that you want to give to the AWS user that is listed in Grantee. Valid values include: * READ : The grantee can read the thumbnails and metadata for thumbnails that Elastic Transcoder adds to the Amazon S3 bucket. * READ_ACP : The grantee can read the object ACL for thumbnails that Elastic Transcoder adds to the Amazon S3 bucket. * WRITE_ACP : The grantee can write the ACL for the thumbnails that Elastic Transcoder adds to the Amazon S3 bucket. * FULL_CONTROL : The grantee has READ, READ_ACP, and WRITE_ACP permissions for the thumbnails that Elastic Transcoder adds to the Amazon S3 bucket. * StorageClass : The Amazon S3 storage class, Standard or ReducedRedundancy , that you want Elastic Transcoder to assign to the thumbnails that it stores in your Amazon S3 bucket.

pipStatus :: Lens' Pipeline (Maybe Text) Source #

The current status of the pipeline: * Active : The pipeline is processing jobs. * Paused : The pipeline is not currently processing jobs.

pipARN :: Lens' Pipeline (Maybe Text) Source #

The Amazon Resource Name (ARN) for the pipeline.

pipInputBucket :: Lens' Pipeline (Maybe Text) Source #

The Amazon S3 bucket from which Elastic Transcoder gets media files for transcoding and the graphics files, if any, that you want to use for watermarks.

pipContentConfig :: Lens' Pipeline (Maybe PipelineOutputConfig) Source #

Information about the Amazon S3 bucket in which you want Elastic Transcoder to save transcoded files and playlists. Either you specify both ContentConfig and ThumbnailConfig , or you specify OutputBucket . * Bucket : The Amazon S3 bucket in which you want Elastic Transcoder to save transcoded files and playlists. * Permissions : A list of the users and/or predefined Amazon S3 groups you want to have access to transcoded files and playlists, and the type of access that you want them to have. * GranteeType: The type of value that appears in the Grantee object: * Canonical : Either the canonical user ID for an AWS account or an origin access identity for an Amazon CloudFront distribution. * Email : The registered email address of an AWS account. * Group : One of the following predefined Amazon S3 groups: AllUsers , AuthenticatedUsers , or LogDelivery . * Grantee : The AWS user or group that you want to have access to transcoded files and playlists. * Access : The permission that you want to give to the AWS user that is listed in Grantee . Valid values include: * READ : The grantee can read the objects and metadata for objects that Elastic Transcoder adds to the Amazon S3 bucket. * READ_ACP : The grantee can read the object ACL for objects that Elastic Transcoder adds to the Amazon S3 bucket. * WRITE_ACP : The grantee can write the ACL for the objects that Elastic Transcoder adds to the Amazon S3 bucket. * FULL_CONTROL : The grantee has READ , READ_ACP , and WRITE_ACP permissions for the objects that Elastic Transcoder adds to the Amazon S3 bucket. * StorageClass : The Amazon S3 storage class, Standard or ReducedRedundancy, that you want Elastic Transcoder to assign to the video files and playlists that it stores in your Amazon S3 bucket.

pipOutputBucket :: Lens' Pipeline (Maybe Text) Source #

The Amazon S3 bucket in which you want Elastic Transcoder to save transcoded files, thumbnails, and playlists. Either you specify this value, or you specify both ContentConfig and ThumbnailConfig .

pipRole :: Lens' Pipeline (Maybe Text) Source #

The IAM Amazon Resource Name (ARN) for the role that Elastic Transcoder uses to transcode jobs for this pipeline.

pipName :: Lens' Pipeline (Maybe Text) Source #

The name of the pipeline. We recommend that the name be unique within the AWS account, but uniqueness is not enforced. Constraints: Maximum 40 characters

pipAWSKMSKeyARN :: Lens' Pipeline (Maybe Text) Source #

The AWS Key Management Service (AWS KMS) key that you want to use with this pipeline. If you use either S3 or S3-AWS-KMS as your Encryption:Mode , you don't need to provide a key with your job because a default key, known as an AWS-KMS key, is created for you automatically. You need to provide an AWS-KMS key only if you want to use a non-default AWS-KMS key, or if you are using an Encryption:Mode of AES-PKCS7 , AES-CTR , or AES-GCM .

pipId :: Lens' Pipeline (Maybe Text) Source #

The identifier for the pipeline. You use this value to identify the pipeline in which you want to perform a variety of operations, such as creating a job or a preset.

pipNotifications :: Lens' Pipeline (Maybe Notifications) Source #

The Amazon Simple Notification Service (Amazon SNS) topic that you want to notify to report job status. Important: To receive notifications, you must also subscribe to the new topic in the Amazon SNS console. * Progressing (optional): The Amazon Simple Notification Service (Amazon SNS) topic that you want to notify when Elastic Transcoder has started to process the job. * Completed (optional): The Amazon SNS topic that you want to notify when Elastic Transcoder has finished processing the job. * Warning (optional): The Amazon SNS topic that you want to notify when Elastic Transcoder encounters a warning condition. * Error (optional): The Amazon SNS topic that you want to notify when Elastic Transcoder encounters an error condition.

pipThumbnailConfig :: Lens' Pipeline (Maybe PipelineOutputConfig) Source #

Information about the Amazon S3 bucket in which you want Elastic Transcoder to save thumbnail files. Either you specify both ContentConfig and ThumbnailConfig , or you specify OutputBucket . * Bucket : The Amazon S3 bucket in which you want Elastic Transcoder to save thumbnail files. * Permissions : A list of the users andor predefined Amazon S3 groups you want to have access to thumbnail files, and the type of access that you want them to have. * GranteeType: The type of value that appears in the Grantee object: * Canonical : Either the canonical user ID for an AWS account or an origin access identity for an Amazon CloudFront distribution. Important:/ A canonical user ID is not the same as an AWS account number. * Email : The registered email address of an AWS account. * Group : One of the following predefined Amazon S3 groups: AllUsers , AuthenticatedUsers , or LogDelivery . * Grantee : The AWS user or group that you want to have access to thumbnail files. * Access: The permission that you want to give to the AWS user that is listed in Grantee. Valid values include: * READ : The grantee can read the thumbnails and metadata for thumbnails that Elastic Transcoder adds to the Amazon S3 bucket. * READ_ACP : The grantee can read the object ACL for thumbnails that Elastic Transcoder adds to the Amazon S3 bucket. * WRITE_ACP : The grantee can write the ACL for the thumbnails that Elastic Transcoder adds to the Amazon S3 bucket. * FULL_CONTROL : The grantee has READ, READ_ACP, and WRITE_ACP permissions for the thumbnails that Elastic Transcoder adds to the Amazon S3 bucket. * StorageClass : The Amazon S3 storage class, Standard or ReducedRedundancy , that you want Elastic Transcoder to assign to the thumbnails that it stores in your Amazon S3 bucket.

PipelineOutputConfig

data PipelineOutputConfig Source #

The PipelineOutputConfig structure.

See: pipelineOutputConfig smart constructor.

Instances

Eq PipelineOutputConfig Source # 
Data PipelineOutputConfig Source # 

Methods

gfoldl :: (forall d b. Data d => c (d -> b) -> d -> c b) -> (forall g. g -> c g) -> PipelineOutputConfig -> c PipelineOutputConfig #

gunfold :: (forall b r. Data b => c (b -> r) -> c r) -> (forall r. r -> c r) -> Constr -> c PipelineOutputConfig #

toConstr :: PipelineOutputConfig -> Constr #

dataTypeOf :: PipelineOutputConfig -> DataType #

dataCast1 :: Typeable (* -> *) t => (forall d. Data d => c (t d)) -> Maybe (c PipelineOutputConfig) #

dataCast2 :: Typeable (* -> * -> *) t => (forall d e. (Data d, Data e) => c (t d e)) -> Maybe (c PipelineOutputConfig) #

gmapT :: (forall b. Data b => b -> b) -> PipelineOutputConfig -> PipelineOutputConfig #

gmapQl :: (r -> r' -> r) -> r -> (forall d. Data d => d -> r') -> PipelineOutputConfig -> r #

gmapQr :: (r' -> r -> r) -> r -> (forall d. Data d => d -> r') -> PipelineOutputConfig -> r #

gmapQ :: (forall d. Data d => d -> u) -> PipelineOutputConfig -> [u] #

gmapQi :: Int -> (forall d. Data d => d -> u) -> PipelineOutputConfig -> u #

gmapM :: Monad m => (forall d. Data d => d -> m d) -> PipelineOutputConfig -> m PipelineOutputConfig #

gmapMp :: MonadPlus m => (forall d. Data d => d -> m d) -> PipelineOutputConfig -> m PipelineOutputConfig #

gmapMo :: MonadPlus m => (forall d. Data d => d -> m d) -> PipelineOutputConfig -> m PipelineOutputConfig #

Read PipelineOutputConfig Source # 
Show PipelineOutputConfig Source # 
Generic PipelineOutputConfig Source # 
Hashable PipelineOutputConfig Source # 
ToJSON PipelineOutputConfig Source # 
FromJSON PipelineOutputConfig Source # 
NFData PipelineOutputConfig Source # 

Methods

rnf :: PipelineOutputConfig -> () #

type Rep PipelineOutputConfig Source # 
type Rep PipelineOutputConfig = D1 * (MetaData "PipelineOutputConfig" "Network.AWS.ElasticTranscoder.Types.Product" "amazonka-elastictranscoder-1.6.0-38CPPD4da2X1lqPFWRIqIv" False) (C1 * (MetaCons "PipelineOutputConfig'" PrefixI True) ((:*:) * (S1 * (MetaSel (Just Symbol "_pocBucket") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))) ((:*:) * (S1 * (MetaSel (Just Symbol "_pocStorageClass") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))) (S1 * (MetaSel (Just Symbol "_pocPermissions") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe [Permission]))))))

pipelineOutputConfig :: PipelineOutputConfig Source #

Creates a value of PipelineOutputConfig with the minimum fields required to make a request.

Use one of the following lenses to modify other fields as desired:

  • pocBucket - The Amazon S3 bucket in which you want Elastic Transcoder to save the transcoded files. Specify this value when all of the following are true: * You want to save transcoded files, thumbnails (if any), and playlists (if any) together in one bucket. * You do not want to specify the users or groups who have access to the transcoded files, thumbnails, and playlists. * You do not want to specify the permissions that Elastic Transcoder grants to the files. * You want to associate the transcoded files and thumbnails with the Amazon S3 Standard storage class. If you want to save transcoded files and playlists in one bucket and thumbnails in another bucket, specify which users can access the transcoded files or the permissions the users have, or change the Amazon S3 storage class, omit OutputBucket and specify values for ContentConfig and ThumbnailConfig instead.
  • pocStorageClass - The Amazon S3 storage class, Standard or ReducedRedundancy , that you want Elastic Transcoder to assign to the video files and playlists that it stores in your Amazon S3 bucket.
  • pocPermissions - Optional. The Permissions object specifies which users andor predefined Amazon S3 groups you want to have access to transcoded files and playlists, and the type of access you want them to have. You can grant permissions to a maximum of 30 users andor predefined Amazon S3 groups. If you include Permissions , Elastic Transcoder grants only the permissions that you specify. It does not grant full permissions to the owner of the role specified by Role . If you want that user to have full control, you must explicitly grant full control to the user. If you omit Permissions , Elastic Transcoder grants full control over the transcoded files and playlists to the owner of the role specified by Role , and grants no other permissions to any other user or group.

pocBucket :: Lens' PipelineOutputConfig (Maybe Text) Source #

The Amazon S3 bucket in which you want Elastic Transcoder to save the transcoded files. Specify this value when all of the following are true: * You want to save transcoded files, thumbnails (if any), and playlists (if any) together in one bucket. * You do not want to specify the users or groups who have access to the transcoded files, thumbnails, and playlists. * You do not want to specify the permissions that Elastic Transcoder grants to the files. * You want to associate the transcoded files and thumbnails with the Amazon S3 Standard storage class. If you want to save transcoded files and playlists in one bucket and thumbnails in another bucket, specify which users can access the transcoded files or the permissions the users have, or change the Amazon S3 storage class, omit OutputBucket and specify values for ContentConfig and ThumbnailConfig instead.

pocStorageClass :: Lens' PipelineOutputConfig (Maybe Text) Source #

The Amazon S3 storage class, Standard or ReducedRedundancy , that you want Elastic Transcoder to assign to the video files and playlists that it stores in your Amazon S3 bucket.

pocPermissions :: Lens' PipelineOutputConfig [Permission] Source #

Optional. The Permissions object specifies which users andor predefined Amazon S3 groups you want to have access to transcoded files and playlists, and the type of access you want them to have. You can grant permissions to a maximum of 30 users andor predefined Amazon S3 groups. If you include Permissions , Elastic Transcoder grants only the permissions that you specify. It does not grant full permissions to the owner of the role specified by Role . If you want that user to have full control, you must explicitly grant full control to the user. If you omit Permissions , Elastic Transcoder grants full control over the transcoded files and playlists to the owner of the role specified by Role , and grants no other permissions to any other user or group.

PlayReadyDrm

data PlayReadyDrm Source #

The PlayReady DRM settings, if any, that you want Elastic Transcoder to apply to the output files associated with this playlist.

PlayReady DRM encrypts your media files using AES-CTR encryption.

If you use DRM for an HLSv3 playlist, your outputs must have a master playlist.

See: playReadyDrm smart constructor.

Instances

Eq PlayReadyDrm Source # 
Data PlayReadyDrm Source # 

Methods

gfoldl :: (forall d b. Data d => c (d -> b) -> d -> c b) -> (forall g. g -> c g) -> PlayReadyDrm -> c PlayReadyDrm #

gunfold :: (forall b r. Data b => c (b -> r) -> c r) -> (forall r. r -> c r) -> Constr -> c PlayReadyDrm #

toConstr :: PlayReadyDrm -> Constr #

dataTypeOf :: PlayReadyDrm -> DataType #

dataCast1 :: Typeable (* -> *) t => (forall d. Data d => c (t d)) -> Maybe (c PlayReadyDrm) #

dataCast2 :: Typeable (* -> * -> *) t => (forall d e. (Data d, Data e) => c (t d e)) -> Maybe (c PlayReadyDrm) #

gmapT :: (forall b. Data b => b -> b) -> PlayReadyDrm -> PlayReadyDrm #

gmapQl :: (r -> r' -> r) -> r -> (forall d. Data d => d -> r') -> PlayReadyDrm -> r #

gmapQr :: (r' -> r -> r) -> r -> (forall d. Data d => d -> r') -> PlayReadyDrm -> r #

gmapQ :: (forall d. Data d => d -> u) -> PlayReadyDrm -> [u] #

gmapQi :: Int -> (forall d. Data d => d -> u) -> PlayReadyDrm -> u #

gmapM :: Monad m => (forall d. Data d => d -> m d) -> PlayReadyDrm -> m PlayReadyDrm #

gmapMp :: MonadPlus m => (forall d. Data d => d -> m d) -> PlayReadyDrm -> m PlayReadyDrm #

gmapMo :: MonadPlus m => (forall d. Data d => d -> m d) -> PlayReadyDrm -> m PlayReadyDrm #

Read PlayReadyDrm Source # 
Show PlayReadyDrm Source # 
Generic PlayReadyDrm Source # 

Associated Types

type Rep PlayReadyDrm :: * -> * #

Hashable PlayReadyDrm Source # 
ToJSON PlayReadyDrm Source # 
FromJSON PlayReadyDrm Source # 
NFData PlayReadyDrm Source # 

Methods

rnf :: PlayReadyDrm -> () #

type Rep PlayReadyDrm Source # 
type Rep PlayReadyDrm = D1 * (MetaData "PlayReadyDrm" "Network.AWS.ElasticTranscoder.Types.Product" "amazonka-elastictranscoder-1.6.0-38CPPD4da2X1lqPFWRIqIv" False) (C1 * (MetaCons "PlayReadyDrm'" PrefixI True) ((:*:) * ((:*:) * (S1 * (MetaSel (Just Symbol "_prdKeyId") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))) ((:*:) * (S1 * (MetaSel (Just Symbol "_prdFormat") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))) (S1 * (MetaSel (Just Symbol "_prdKeyMD5") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))))) ((:*:) * (S1 * (MetaSel (Just Symbol "_prdKey") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))) ((:*:) * (S1 * (MetaSel (Just Symbol "_prdInitializationVector") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))) (S1 * (MetaSel (Just Symbol "_prdLicenseAcquisitionURL") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text)))))))

playReadyDrm :: PlayReadyDrm Source #

Creates a value of PlayReadyDrm with the minimum fields required to make a request.

Use one of the following lenses to modify other fields as desired:

  • prdKeyId - The ID for your DRM key, so that your DRM license provider knows which key to provide. The key ID must be provided in big endian, and Elastic Transcoder converts it to little endian before inserting it into the PlayReady DRM headers. If you are unsure whether your license server provides your key ID in big or little endian, check with your DRM provider.
  • prdFormat - The type of DRM, if any, that you want Elastic Transcoder to apply to the output files associated with this playlist.
  • prdKeyMD5 - The MD5 digest of the key used for DRM on your file, and that you want Elastic Transcoder to use as a checksum to make sure your key was not corrupted in transit. The key MD5 must be base64-encoded, and it must be exactly 16 bytes before being base64-encoded.
  • prdKey - The DRM key for your file, provided by your DRM license provider. The key must be base64-encoded, and it must be one of the following bit lengths before being base64-encoded: 128 , 192 , or 256 . The key must also be encrypted by using AWS KMS.
  • prdInitializationVector - The series of random bits created by a random bit generator, unique for every encryption operation, that you want Elastic Transcoder to use to encrypt your files. The initialization vector must be base64-encoded, and it must be exactly 8 bytes long before being base64-encoded. If no initialization vector is provided, Elastic Transcoder generates one for you.
  • prdLicenseAcquisitionURL - The location of the license key required to play DRM content. The URL must be an absolute path, and is referenced by the PlayReady header. The PlayReady header is referenced in the protection header of the client manifest for Smooth Streaming outputs, and in the EXT-X-DXDRM and EXT-XDXDRMINFO metadata tags for HLS playlist outputs. An example URL looks like this: https://www.example.com/exampleKey/

prdKeyId :: Lens' PlayReadyDrm (Maybe Text) Source #

The ID for your DRM key, so that your DRM license provider knows which key to provide. The key ID must be provided in big endian, and Elastic Transcoder converts it to little endian before inserting it into the PlayReady DRM headers. If you are unsure whether your license server provides your key ID in big or little endian, check with your DRM provider.

prdFormat :: Lens' PlayReadyDrm (Maybe Text) Source #

The type of DRM, if any, that you want Elastic Transcoder to apply to the output files associated with this playlist.

prdKeyMD5 :: Lens' PlayReadyDrm (Maybe Text) Source #

The MD5 digest of the key used for DRM on your file, and that you want Elastic Transcoder to use as a checksum to make sure your key was not corrupted in transit. The key MD5 must be base64-encoded, and it must be exactly 16 bytes before being base64-encoded.

prdKey :: Lens' PlayReadyDrm (Maybe Text) Source #

The DRM key for your file, provided by your DRM license provider. The key must be base64-encoded, and it must be one of the following bit lengths before being base64-encoded: 128 , 192 , or 256 . The key must also be encrypted by using AWS KMS.

prdInitializationVector :: Lens' PlayReadyDrm (Maybe Text) Source #

The series of random bits created by a random bit generator, unique for every encryption operation, that you want Elastic Transcoder to use to encrypt your files. The initialization vector must be base64-encoded, and it must be exactly 8 bytes long before being base64-encoded. If no initialization vector is provided, Elastic Transcoder generates one for you.

prdLicenseAcquisitionURL :: Lens' PlayReadyDrm (Maybe Text) Source #

The location of the license key required to play DRM content. The URL must be an absolute path, and is referenced by the PlayReady header. The PlayReady header is referenced in the protection header of the client manifest for Smooth Streaming outputs, and in the EXT-X-DXDRM and EXT-XDXDRMINFO metadata tags for HLS playlist outputs. An example URL looks like this: https://www.example.com/exampleKey/

Playlist

data Playlist Source #

Use Only for Fragmented MP4 or MPEG-TS Outputs. If you specify a preset for which the value of Container is fmp4 (Fragmented MP4) or ts (MPEG-TS), Playlists contains information about the master playlists that you want Elastic Transcoder to create. We recommend that you create only one master playlist per output format. The maximum number of master playlists in a job is 30.

See: playlist smart constructor.

Instances

Eq Playlist Source # 
Data Playlist Source # 

Methods

gfoldl :: (forall d b. Data d => c (d -> b) -> d -> c b) -> (forall g. g -> c g) -> Playlist -> c Playlist #

gunfold :: (forall b r. Data b => c (b -> r) -> c r) -> (forall r. r -> c r) -> Constr -> c Playlist #

toConstr :: Playlist -> Constr #

dataTypeOf :: Playlist -> DataType #

dataCast1 :: Typeable (* -> *) t => (forall d. Data d => c (t d)) -> Maybe (c Playlist) #

dataCast2 :: Typeable (* -> * -> *) t => (forall d e. (Data d, Data e) => c (t d e)) -> Maybe (c Playlist) #

gmapT :: (forall b. Data b => b -> b) -> Playlist -> Playlist #

gmapQl :: (r -> r' -> r) -> r -> (forall d. Data d => d -> r') -> Playlist -> r #

gmapQr :: (r' -> r -> r) -> r -> (forall d. Data d => d -> r') -> Playlist -> r #

gmapQ :: (forall d. Data d => d -> u) -> Playlist -> [u] #

gmapQi :: Int -> (forall d. Data d => d -> u) -> Playlist -> u #

gmapM :: Monad m => (forall d. Data d => d -> m d) -> Playlist -> m Playlist #

gmapMp :: MonadPlus m => (forall d. Data d => d -> m d) -> Playlist -> m Playlist #

gmapMo :: MonadPlus m => (forall d. Data d => d -> m d) -> Playlist -> m Playlist #

Read Playlist Source # 
Show Playlist Source # 
Generic Playlist Source # 

Associated Types

type Rep Playlist :: * -> * #

Methods

from :: Playlist -> Rep Playlist x #

to :: Rep Playlist x -> Playlist #

Hashable Playlist Source # 

Methods

hashWithSalt :: Int -> Playlist -> Int #

hash :: Playlist -> Int #

FromJSON Playlist Source # 
NFData Playlist Source # 

Methods

rnf :: Playlist -> () #

type Rep Playlist Source # 

playlist :: Playlist Source #

Creates a value of Playlist with the minimum fields required to make a request.

Use one of the following lenses to modify other fields as desired:

  • pStatus - The status of the job with which the playlist is associated.
  • pPlayReadyDrm - The DRM settings, if any, that you want Elastic Transcoder to apply to the output files associated with this playlist.
  • pFormat - The format of the output playlist. Valid formats include HLSv3 , HLSv4 , and Smooth .
  • pOutputKeys - For each output in this job that you want to include in a master playlist, the value of the Outputs:Key object. * If your output is not HLS or does not have a segment duration set, the name of the output file is a concatenation of OutputKeyPrefix and Outputs:Key : OutputKeyPrefixOutputs:Key * If your output is HLSv3 and has a segment duration set, or is not included in a playlist, Elastic Transcoder creates an output playlist file with a file extension of .m3u8 , and a series of .ts files that include a five-digit sequential counter beginning with 00000: OutputKeyPrefixOutputs:Key .m3u8 OutputKeyPrefixOutputs:Key 00000.ts * If your output is HLSv4 , has a segment duration set, and is included in an HLSv4 playlist, Elastic Transcoder creates an output playlist file with a file extension of _v4.m3u8 . If the output is video, Elastic Transcoder also creates an output file with an extension of _iframe.m3u8 : OutputKeyPrefixOutputs:Key _v4.m3u8 OutputKeyPrefixOutputs:Key _iframe.m3u8 OutputKeyPrefixOutputs:Key .ts Elastic Transcoder automatically appends the relevant file extension to the file name. If you include a file extension in Output Key, the file name will have two extensions. If you include more than one output in a playlist, any segment duration settings, clip settings, or caption settings must be the same for all outputs in the playlist. For Smooth playlists, the Audio:Profile , Video:Profile , and Video:FrameRate to Video:KeyframesMaxDist ratio must be the same for all outputs.
  • pName - The name that you want Elastic Transcoder to assign to the master playlist, for example, nyc-vacation.m3u8. If the name includes a / character, the section of the name before the last / must be identical for all Name objects. If you create more than one master playlist, the values of all Name objects must be unique.
  • pStatusDetail - Information that further explains the status.
  • pHlsContentProtection - The HLS content protection settings, if any, that you want Elastic Transcoder to apply to the output files associated with this playlist.

pStatus :: Lens' Playlist (Maybe Text) Source #

The status of the job with which the playlist is associated.

pPlayReadyDrm :: Lens' Playlist (Maybe PlayReadyDrm) Source #

The DRM settings, if any, that you want Elastic Transcoder to apply to the output files associated with this playlist.

pFormat :: Lens' Playlist (Maybe Text) Source #

The format of the output playlist. Valid formats include HLSv3 , HLSv4 , and Smooth .

pOutputKeys :: Lens' Playlist [Text] Source #

For each output in this job that you want to include in a master playlist, the value of the Outputs:Key object. * If your output is not HLS or does not have a segment duration set, the name of the output file is a concatenation of OutputKeyPrefix and Outputs:Key : OutputKeyPrefixOutputs:Key * If your output is HLSv3 and has a segment duration set, or is not included in a playlist, Elastic Transcoder creates an output playlist file with a file extension of .m3u8 , and a series of .ts files that include a five-digit sequential counter beginning with 00000: OutputKeyPrefixOutputs:Key .m3u8 OutputKeyPrefixOutputs:Key 00000.ts * If your output is HLSv4 , has a segment duration set, and is included in an HLSv4 playlist, Elastic Transcoder creates an output playlist file with a file extension of _v4.m3u8 . If the output is video, Elastic Transcoder also creates an output file with an extension of _iframe.m3u8 : OutputKeyPrefixOutputs:Key _v4.m3u8 OutputKeyPrefixOutputs:Key _iframe.m3u8 OutputKeyPrefixOutputs:Key .ts Elastic Transcoder automatically appends the relevant file extension to the file name. If you include a file extension in Output Key, the file name will have two extensions. If you include more than one output in a playlist, any segment duration settings, clip settings, or caption settings must be the same for all outputs in the playlist. For Smooth playlists, the Audio:Profile , Video:Profile , and Video:FrameRate to Video:KeyframesMaxDist ratio must be the same for all outputs.

pName :: Lens' Playlist (Maybe Text) Source #

The name that you want Elastic Transcoder to assign to the master playlist, for example, nyc-vacation.m3u8. If the name includes a / character, the section of the name before the last / must be identical for all Name objects. If you create more than one master playlist, the values of all Name objects must be unique.

pStatusDetail :: Lens' Playlist (Maybe Text) Source #

Information that further explains the status.

pHlsContentProtection :: Lens' Playlist (Maybe HlsContentProtection) Source #

The HLS content protection settings, if any, that you want Elastic Transcoder to apply to the output files associated with this playlist.

Preset

data Preset Source #

Presets are templates that contain most of the settings for transcoding media files from one format to another. Elastic Transcoder includes some default presets for common formats, for example, several iPod and iPhone versions. You can also create your own presets for formats that aren't included among the default presets. You specify which preset you want to use when you create a job.

See: preset smart constructor.

Instances

Eq Preset Source # 

Methods

(==) :: Preset -> Preset -> Bool #

(/=) :: Preset -> Preset -> Bool #

Data Preset Source # 

Methods

gfoldl :: (forall d b. Data d => c (d -> b) -> d -> c b) -> (forall g. g -> c g) -> Preset -> c Preset #

gunfold :: (forall b r. Data b => c (b -> r) -> c r) -> (forall r. r -> c r) -> Constr -> c Preset #

toConstr :: Preset -> Constr #

dataTypeOf :: Preset -> DataType #

dataCast1 :: Typeable (* -> *) t => (forall d. Data d => c (t d)) -> Maybe (c Preset) #

dataCast2 :: Typeable (* -> * -> *) t => (forall d e. (Data d, Data e) => c (t d e)) -> Maybe (c Preset) #

gmapT :: (forall b. Data b => b -> b) -> Preset -> Preset #

gmapQl :: (r -> r' -> r) -> r -> (forall d. Data d => d -> r') -> Preset -> r #

gmapQr :: (r' -> r -> r) -> r -> (forall d. Data d => d -> r') -> Preset -> r #

gmapQ :: (forall d. Data d => d -> u) -> Preset -> [u] #

gmapQi :: Int -> (forall d. Data d => d -> u) -> Preset -> u #

gmapM :: Monad m => (forall d. Data d => d -> m d) -> Preset -> m Preset #

gmapMp :: MonadPlus m => (forall d. Data d => d -> m d) -> Preset -> m Preset #

gmapMo :: MonadPlus m => (forall d. Data d => d -> m d) -> Preset -> m Preset #

Read Preset Source # 
Show Preset Source # 
Generic Preset Source # 

Associated Types

type Rep Preset :: * -> * #

Methods

from :: Preset -> Rep Preset x #

to :: Rep Preset x -> Preset #

Hashable Preset Source # 

Methods

hashWithSalt :: Int -> Preset -> Int #

hash :: Preset -> Int #

FromJSON Preset Source # 
NFData Preset Source # 

Methods

rnf :: Preset -> () #

type Rep Preset Source # 

preset :: Preset Source #

Creates a value of Preset with the minimum fields required to make a request.

Use one of the following lenses to modify other fields as desired:

  • preARN - The Amazon Resource Name (ARN) for the preset.
  • preVideo - A section of the response body that provides information about the video preset values.
  • preThumbnails - A section of the response body that provides information about the thumbnail preset values, if any.
  • preName - The name of the preset.
  • preContainer - The container type for the output file. Valid values include flac , flv , fmp4 , gif , mp3 , mp4 , mpg , mxf , oga , ogg , ts , and webm .
  • preId - Identifier for the new preset. You use this value to get settings for the preset or to delete it.
  • preType - Whether the preset is a default preset provided by Elastic Transcoder (System ) or a preset that you have defined (Custom ).
  • preDescription - A description of the preset.
  • preAudio - A section of the response body that provides information about the audio preset values.

preARN :: Lens' Preset (Maybe Text) Source #

The Amazon Resource Name (ARN) for the preset.

preVideo :: Lens' Preset (Maybe VideoParameters) Source #

A section of the response body that provides information about the video preset values.

preThumbnails :: Lens' Preset (Maybe Thumbnails) Source #

A section of the response body that provides information about the thumbnail preset values, if any.

preName :: Lens' Preset (Maybe Text) Source #

The name of the preset.

preContainer :: Lens' Preset (Maybe Text) Source #

The container type for the output file. Valid values include flac , flv , fmp4 , gif , mp3 , mp4 , mpg , mxf , oga , ogg , ts , and webm .

preId :: Lens' Preset (Maybe Text) Source #

Identifier for the new preset. You use this value to get settings for the preset or to delete it.

preType :: Lens' Preset (Maybe Text) Source #

Whether the preset is a default preset provided by Elastic Transcoder (System ) or a preset that you have defined (Custom ).

preDescription :: Lens' Preset (Maybe Text) Source #

A description of the preset.

preAudio :: Lens' Preset (Maybe AudioParameters) Source #

A section of the response body that provides information about the audio preset values.

PresetWatermark

data PresetWatermark Source #

Settings for the size, location, and opacity of graphics that you want Elastic Transcoder to overlay over videos that are transcoded using this preset. You can specify settings for up to four watermarks. Watermarks appear in the specified size and location, and with the specified opacity for the duration of the transcoded video.

Watermarks can be in .png or .jpg format. If you want to display a watermark that is not rectangular, use the .png format, which supports transparency.

When you create a job that uses this preset, you specify the .png or .jpg graphics that you want Elastic Transcoder to include in the transcoded videos. You can specify fewer graphics in the job than you specify watermark settings in the preset, which allows you to use the same preset for up to four watermarks that have different dimensions.

See: presetWatermark smart constructor.

Instances

Eq PresetWatermark Source # 
Data PresetWatermark Source # 

Methods

gfoldl :: (forall d b. Data d => c (d -> b) -> d -> c b) -> (forall g. g -> c g) -> PresetWatermark -> c PresetWatermark #

gunfold :: (forall b r. Data b => c (b -> r) -> c r) -> (forall r. r -> c r) -> Constr -> c PresetWatermark #

toConstr :: PresetWatermark -> Constr #

dataTypeOf :: PresetWatermark -> DataType #

dataCast1 :: Typeable (* -> *) t => (forall d. Data d => c (t d)) -> Maybe (c PresetWatermark) #

dataCast2 :: Typeable (* -> * -> *) t => (forall d e. (Data d, Data e) => c (t d e)) -> Maybe (c PresetWatermark) #

gmapT :: (forall b. Data b => b -> b) -> PresetWatermark -> PresetWatermark #

gmapQl :: (r -> r' -> r) -> r -> (forall d. Data d => d -> r') -> PresetWatermark -> r #

gmapQr :: (r' -> r -> r) -> r -> (forall d. Data d => d -> r') -> PresetWatermark -> r #

gmapQ :: (forall d. Data d => d -> u) -> PresetWatermark -> [u] #

gmapQi :: Int -> (forall d. Data d => d -> u) -> PresetWatermark -> u #

gmapM :: Monad m => (forall d. Data d => d -> m d) -> PresetWatermark -> m PresetWatermark #

gmapMp :: MonadPlus m => (forall d. Data d => d -> m d) -> PresetWatermark -> m PresetWatermark #

gmapMo :: MonadPlus m => (forall d. Data d => d -> m d) -> PresetWatermark -> m PresetWatermark #

Read PresetWatermark Source # 
Show PresetWatermark Source # 
Generic PresetWatermark Source # 
Hashable PresetWatermark Source # 
ToJSON PresetWatermark Source # 
FromJSON PresetWatermark Source # 
NFData PresetWatermark Source # 

Methods

rnf :: PresetWatermark -> () #

type Rep PresetWatermark Source # 

presetWatermark :: PresetWatermark Source #

Creates a value of PresetWatermark with the minimum fields required to make a request.

Use one of the following lenses to modify other fields as desired:

  • pwVerticalAlign - The vertical position of the watermark unless you specify a non-zero value for VerticalOffset : * Top : The top edge of the watermark is aligned with the top border of the video. * Bottom : The bottom edge of the watermark is aligned with the bottom border of the video. * Center : The watermark is centered between the top and bottom borders.
  • pwSizingPolicy - A value that controls scaling of the watermark: * Fit : Elastic Transcoder scales the watermark so it matches the value that you specified in either MaxWidth or MaxHeight without exceeding the other value. * Stretch : Elastic Transcoder stretches the watermark to match the values that you specified for MaxWidth and MaxHeight . If the relative proportions of the watermark and the values of MaxWidth and MaxHeight are different, the watermark will be distorted. * ShrinkToFit : Elastic Transcoder scales the watermark down so that its dimensions match the values that you specified for at least one of MaxWidth and MaxHeight without exceeding either value. If you specify this option, Elastic Transcoder does not scale the watermark up.
  • pwHorizontalOffset - The amount by which you want the horizontal position of the watermark to be offset from the position specified by HorizontalAlign: * number of pixels (px): The minimum value is 0 pixels, and the maximum value is the value of MaxWidth. * integer percentage (%): The range of valid values is 0 to 100. For example, if you specify Left for HorizontalAlign and 5px for HorizontalOffset , the left side of the watermark appears 5 pixels from the left border of the output video. HorizontalOffset is only valid when the value of HorizontalAlign is Left or Right . If you specify an offset that causes the watermark to extend beyond the left or right border and Elastic Transcoder has not added black bars, the watermark is cropped. If Elastic Transcoder has added black bars, the watermark extends into the black bars. If the watermark extends beyond the black bars, it is cropped. Use the value of Target to specify whether you want to include the black bars that are added by Elastic Transcoder, if any, in the offset calculation.
  • pwMaxHeight - The maximum height of the watermark in one of the following formats: * number of pixels (px): The minimum value is 16 pixels, and the maximum value is the value of MaxHeight . * integer percentage (%): The range of valid values is 0 to 100. Use the value of Target to specify whether you want Elastic Transcoder to include the black bars that are added by Elastic Transcoder, if any, in the calculation. If you specify the value in pixels, it must be less than or equal to the value of MaxHeight .
  • pwOpacity - A percentage that indicates how much you want a watermark to obscure the video in the location where it appears. Valid values are 0 (the watermark is invisible) to 100 (the watermark completely obscures the video in the specified location). The datatype of Opacity is float. Elastic Transcoder supports transparent .png graphics. If you use a transparent .png, the transparent portion of the video appears as if you had specified a value of 0 for Opacity . The .jpg file format doesn't support transparency.
  • pwVerticalOffset - VerticalOffset The amount by which you want the vertical position of the watermark to be offset from the position specified by VerticalAlign: * number of pixels (px): The minimum value is 0 pixels, and the maximum value is the value of MaxHeight . * integer percentage (%): The range of valid values is 0 to 100. For example, if you specify Top for VerticalAlign and 5px for VerticalOffset , the top of the watermark appears 5 pixels from the top border of the output video. VerticalOffset is only valid when the value of VerticalAlign is Top or Bottom. If you specify an offset that causes the watermark to extend beyond the top or bottom border and Elastic Transcoder has not added black bars, the watermark is cropped. If Elastic Transcoder has added black bars, the watermark extends into the black bars. If the watermark extends beyond the black bars, it is cropped. Use the value of Target to specify whether you want Elastic Transcoder to include the black bars that are added by Elastic Transcoder, if any, in the offset calculation.
  • pwMaxWidth - The maximum width of the watermark in one of the following formats: * number of pixels (px): The minimum value is 16 pixels, and the maximum value is the value of MaxWidth . * integer percentage (%): The range of valid values is 0 to 100. Use the value of Target to specify whether you want Elastic Transcoder to include the black bars that are added by Elastic Transcoder, if any, in the calculation. If you specify the value in pixels, it must be less than or equal to the value of MaxWidth .
  • pwId - A unique identifier for the settings for one watermark. The value of Id can be up to 40 characters long.
  • pwHorizontalAlign - The horizontal position of the watermark unless you specify a non-zero value for HorizontalOffset : * Left : The left edge of the watermark is aligned with the left border of the video. * Right : The right edge of the watermark is aligned with the right border of the video. * Center : The watermark is centered between the left and right borders.
  • pwTarget - A value that determines how Elastic Transcoder interprets values that you specified for HorizontalOffset , VerticalOffset , MaxWidth , and MaxHeight : * Content : HorizontalOffset and VerticalOffset values are calculated based on the borders of the video excluding black bars added by Elastic Transcoder, if any. In addition, MaxWidth and MaxHeight , if specified as a percentage, are calculated based on the borders of the video excluding black bars added by Elastic Transcoder, if any. * Frame : HorizontalOffset and VerticalOffset values are calculated based on the borders of the video including black bars added by Elastic Transcoder, if any. In addition, MaxWidth and MaxHeight , if specified as a percentage, are calculated based on the borders of the video including black bars added by Elastic Transcoder, if any.

pwVerticalAlign :: Lens' PresetWatermark (Maybe Text) Source #

The vertical position of the watermark unless you specify a non-zero value for VerticalOffset : * Top : The top edge of the watermark is aligned with the top border of the video. * Bottom : The bottom edge of the watermark is aligned with the bottom border of the video. * Center : The watermark is centered between the top and bottom borders.

pwSizingPolicy :: Lens' PresetWatermark (Maybe Text) Source #

A value that controls scaling of the watermark: * Fit : Elastic Transcoder scales the watermark so it matches the value that you specified in either MaxWidth or MaxHeight without exceeding the other value. * Stretch : Elastic Transcoder stretches the watermark to match the values that you specified for MaxWidth and MaxHeight . If the relative proportions of the watermark and the values of MaxWidth and MaxHeight are different, the watermark will be distorted. * ShrinkToFit : Elastic Transcoder scales the watermark down so that its dimensions match the values that you specified for at least one of MaxWidth and MaxHeight without exceeding either value. If you specify this option, Elastic Transcoder does not scale the watermark up.

pwHorizontalOffset :: Lens' PresetWatermark (Maybe Text) Source #

The amount by which you want the horizontal position of the watermark to be offset from the position specified by HorizontalAlign: * number of pixels (px): The minimum value is 0 pixels, and the maximum value is the value of MaxWidth. * integer percentage (%): The range of valid values is 0 to 100. For example, if you specify Left for HorizontalAlign and 5px for HorizontalOffset , the left side of the watermark appears 5 pixels from the left border of the output video. HorizontalOffset is only valid when the value of HorizontalAlign is Left or Right . If you specify an offset that causes the watermark to extend beyond the left or right border and Elastic Transcoder has not added black bars, the watermark is cropped. If Elastic Transcoder has added black bars, the watermark extends into the black bars. If the watermark extends beyond the black bars, it is cropped. Use the value of Target to specify whether you want to include the black bars that are added by Elastic Transcoder, if any, in the offset calculation.

pwMaxHeight :: Lens' PresetWatermark (Maybe Text) Source #

The maximum height of the watermark in one of the following formats: * number of pixels (px): The minimum value is 16 pixels, and the maximum value is the value of MaxHeight . * integer percentage (%): The range of valid values is 0 to 100. Use the value of Target to specify whether you want Elastic Transcoder to include the black bars that are added by Elastic Transcoder, if any, in the calculation. If you specify the value in pixels, it must be less than or equal to the value of MaxHeight .

pwOpacity :: Lens' PresetWatermark (Maybe Text) Source #

A percentage that indicates how much you want a watermark to obscure the video in the location where it appears. Valid values are 0 (the watermark is invisible) to 100 (the watermark completely obscures the video in the specified location). The datatype of Opacity is float. Elastic Transcoder supports transparent .png graphics. If you use a transparent .png, the transparent portion of the video appears as if you had specified a value of 0 for Opacity . The .jpg file format doesn't support transparency.

pwVerticalOffset :: Lens' PresetWatermark (Maybe Text) Source #

VerticalOffset The amount by which you want the vertical position of the watermark to be offset from the position specified by VerticalAlign: * number of pixels (px): The minimum value is 0 pixels, and the maximum value is the value of MaxHeight . * integer percentage (%): The range of valid values is 0 to 100. For example, if you specify Top for VerticalAlign and 5px for VerticalOffset , the top of the watermark appears 5 pixels from the top border of the output video. VerticalOffset is only valid when the value of VerticalAlign is Top or Bottom. If you specify an offset that causes the watermark to extend beyond the top or bottom border and Elastic Transcoder has not added black bars, the watermark is cropped. If Elastic Transcoder has added black bars, the watermark extends into the black bars. If the watermark extends beyond the black bars, it is cropped. Use the value of Target to specify whether you want Elastic Transcoder to include the black bars that are added by Elastic Transcoder, if any, in the offset calculation.

pwMaxWidth :: Lens' PresetWatermark (Maybe Text) Source #

The maximum width of the watermark in one of the following formats: * number of pixels (px): The minimum value is 16 pixels, and the maximum value is the value of MaxWidth . * integer percentage (%): The range of valid values is 0 to 100. Use the value of Target to specify whether you want Elastic Transcoder to include the black bars that are added by Elastic Transcoder, if any, in the calculation. If you specify the value in pixels, it must be less than or equal to the value of MaxWidth .

pwId :: Lens' PresetWatermark (Maybe Text) Source #

A unique identifier for the settings for one watermark. The value of Id can be up to 40 characters long.

pwHorizontalAlign :: Lens' PresetWatermark (Maybe Text) Source #

The horizontal position of the watermark unless you specify a non-zero value for HorizontalOffset : * Left : The left edge of the watermark is aligned with the left border of the video. * Right : The right edge of the watermark is aligned with the right border of the video. * Center : The watermark is centered between the left and right borders.

pwTarget :: Lens' PresetWatermark (Maybe Text) Source #

A value that determines how Elastic Transcoder interprets values that you specified for HorizontalOffset , VerticalOffset , MaxWidth , and MaxHeight : * Content : HorizontalOffset and VerticalOffset values are calculated based on the borders of the video excluding black bars added by Elastic Transcoder, if any. In addition, MaxWidth and MaxHeight , if specified as a percentage, are calculated based on the borders of the video excluding black bars added by Elastic Transcoder, if any. * Frame : HorizontalOffset and VerticalOffset values are calculated based on the borders of the video including black bars added by Elastic Transcoder, if any. In addition, MaxWidth and MaxHeight , if specified as a percentage, are calculated based on the borders of the video including black bars added by Elastic Transcoder, if any.

Thumbnails

data Thumbnails Source #

Thumbnails for videos.

See: thumbnails smart constructor.

Instances

Eq Thumbnails Source # 
Data Thumbnails Source # 

Methods

gfoldl :: (forall d b. Data d => c (d -> b) -> d -> c b) -> (forall g. g -> c g) -> Thumbnails -> c Thumbnails #

gunfold :: (forall b r. Data b => c (b -> r) -> c r) -> (forall r. r -> c r) -> Constr -> c Thumbnails #

toConstr :: Thumbnails -> Constr #

dataTypeOf :: Thumbnails -> DataType #

dataCast1 :: Typeable (* -> *) t => (forall d. Data d => c (t d)) -> Maybe (c Thumbnails) #

dataCast2 :: Typeable (* -> * -> *) t => (forall d e. (Data d, Data e) => c (t d e)) -> Maybe (c Thumbnails) #

gmapT :: (forall b. Data b => b -> b) -> Thumbnails -> Thumbnails #

gmapQl :: (r -> r' -> r) -> r -> (forall d. Data d => d -> r') -> Thumbnails -> r #

gmapQr :: (r' -> r -> r) -> r -> (forall d. Data d => d -> r') -> Thumbnails -> r #

gmapQ :: (forall d. Data d => d -> u) -> Thumbnails -> [u] #

gmapQi :: Int -> (forall d. Data d => d -> u) -> Thumbnails -> u #

gmapM :: Monad m => (forall d. Data d => d -> m d) -> Thumbnails -> m Thumbnails #

gmapMp :: MonadPlus m => (forall d. Data d => d -> m d) -> Thumbnails -> m Thumbnails #

gmapMo :: MonadPlus m => (forall d. Data d => d -> m d) -> Thumbnails -> m Thumbnails #

Read Thumbnails Source # 
Show Thumbnails Source # 
Generic Thumbnails Source # 

Associated Types

type Rep Thumbnails :: * -> * #

Hashable Thumbnails Source # 
ToJSON Thumbnails Source # 
FromJSON Thumbnails Source # 
NFData Thumbnails Source # 

Methods

rnf :: Thumbnails -> () #

type Rep Thumbnails Source # 

thumbnails :: Thumbnails Source #

Creates a value of Thumbnails with the minimum fields required to make a request.

Use one of the following lenses to modify other fields as desired:

  • tSizingPolicy - Specify one of the following values to control scaling of thumbnails: * Fit : Elastic Transcoder scales thumbnails so they match the value that you specified in thumbnail MaxWidth or MaxHeight settings without exceeding the other value. * Fill : Elastic Transcoder scales thumbnails so they match the value that you specified in thumbnail MaxWidth or MaxHeight settings and matches or exceeds the other value. Elastic Transcoder centers the image in thumbnails and then crops in the dimension (if any) that exceeds the maximum value. * Stretch : Elastic Transcoder stretches thumbnails to match the values that you specified for thumbnail MaxWidth and MaxHeight settings. If the relative proportions of the input video and thumbnails are different, the thumbnails will be distorted. * Keep : Elastic Transcoder does not scale thumbnails. If either dimension of the input video exceeds the values that you specified for thumbnail MaxWidth and MaxHeight settings, Elastic Transcoder crops the thumbnails. * ShrinkToFit : Elastic Transcoder scales thumbnails down so that their dimensions match the values that you specified for at least one of thumbnail MaxWidth and MaxHeight without exceeding either value. If you specify this option, Elastic Transcoder does not scale thumbnails up. * ShrinkToFill : Elastic Transcoder scales thumbnails down so that their dimensions match the values that you specified for at least one of MaxWidth and MaxHeight without dropping below either value. If you specify this option, Elastic Transcoder does not scale thumbnails up.
  • tFormat - The format of thumbnails, if any. Valid values are jpg and png . You specify whether you want Elastic Transcoder to create thumbnails when you create a job.
  • tMaxHeight - The maximum height of thumbnails in pixels. If you specify auto, Elastic Transcoder uses 1080 (Full HD) as the default value. If you specify a numeric value, enter an even integer between 32 and 3072.
  • tResolution - Important: To better control resolution and aspect ratio of thumbnails, we recommend that you use the values MaxWidth , MaxHeight , SizingPolicy , and PaddingPolicy instead of Resolution and AspectRatio . The two groups of settings are mutually exclusive. Do not use them together. The width and height of thumbnail files in pixels. Specify a value in the format width x height where both values are even integers. The values cannot exceed the width and height that you specified in the Video:Resolution object.
  • tAspectRatio - Important: To better control resolution and aspect ratio of thumbnails, we recommend that you use the values MaxWidth , MaxHeight , SizingPolicy , and PaddingPolicy instead of Resolution and AspectRatio . The two groups of settings are mutually exclusive. Do not use them together. The aspect ratio of thumbnails. Valid values include: auto , 1:1 , 4:3 , 3:2 , 16:9 If you specify auto , Elastic Transcoder tries to preserve the aspect ratio of the video in the output file.
  • tPaddingPolicy - When you set PaddingPolicy to Pad , Elastic Transcoder may add black bars to the top and bottom and/or left and right sides of thumbnails to make the total size of the thumbnails match the values that you specified for thumbnail MaxWidth and MaxHeight settings.
  • tInterval - The approximate number of seconds between thumbnails. Specify an integer value.
  • tMaxWidth - The maximum width of thumbnails in pixels. If you specify auto, Elastic Transcoder uses 1920 (Full HD) as the default value. If you specify a numeric value, enter an even integer between 32 and 4096.

tSizingPolicy :: Lens' Thumbnails (Maybe Text) Source #

Specify one of the following values to control scaling of thumbnails: * Fit : Elastic Transcoder scales thumbnails so they match the value that you specified in thumbnail MaxWidth or MaxHeight settings without exceeding the other value. * Fill : Elastic Transcoder scales thumbnails so they match the value that you specified in thumbnail MaxWidth or MaxHeight settings and matches or exceeds the other value. Elastic Transcoder centers the image in thumbnails and then crops in the dimension (if any) that exceeds the maximum value. * Stretch : Elastic Transcoder stretches thumbnails to match the values that you specified for thumbnail MaxWidth and MaxHeight settings. If the relative proportions of the input video and thumbnails are different, the thumbnails will be distorted. * Keep : Elastic Transcoder does not scale thumbnails. If either dimension of the input video exceeds the values that you specified for thumbnail MaxWidth and MaxHeight settings, Elastic Transcoder crops the thumbnails. * ShrinkToFit : Elastic Transcoder scales thumbnails down so that their dimensions match the values that you specified for at least one of thumbnail MaxWidth and MaxHeight without exceeding either value. If you specify this option, Elastic Transcoder does not scale thumbnails up. * ShrinkToFill : Elastic Transcoder scales thumbnails down so that their dimensions match the values that you specified for at least one of MaxWidth and MaxHeight without dropping below either value. If you specify this option, Elastic Transcoder does not scale thumbnails up.

tFormat :: Lens' Thumbnails (Maybe Text) Source #

The format of thumbnails, if any. Valid values are jpg and png . You specify whether you want Elastic Transcoder to create thumbnails when you create a job.

tMaxHeight :: Lens' Thumbnails (Maybe Text) Source #

The maximum height of thumbnails in pixels. If you specify auto, Elastic Transcoder uses 1080 (Full HD) as the default value. If you specify a numeric value, enter an even integer between 32 and 3072.

tResolution :: Lens' Thumbnails (Maybe Text) Source #

Important: To better control resolution and aspect ratio of thumbnails, we recommend that you use the values MaxWidth , MaxHeight , SizingPolicy , and PaddingPolicy instead of Resolution and AspectRatio . The two groups of settings are mutually exclusive. Do not use them together. The width and height of thumbnail files in pixels. Specify a value in the format width x height where both values are even integers. The values cannot exceed the width and height that you specified in the Video:Resolution object.

tAspectRatio :: Lens' Thumbnails (Maybe Text) Source #

Important: To better control resolution and aspect ratio of thumbnails, we recommend that you use the values MaxWidth , MaxHeight , SizingPolicy , and PaddingPolicy instead of Resolution and AspectRatio . The two groups of settings are mutually exclusive. Do not use them together. The aspect ratio of thumbnails. Valid values include: auto , 1:1 , 4:3 , 3:2 , 16:9 If you specify auto , Elastic Transcoder tries to preserve the aspect ratio of the video in the output file.

tPaddingPolicy :: Lens' Thumbnails (Maybe Text) Source #

When you set PaddingPolicy to Pad , Elastic Transcoder may add black bars to the top and bottom and/or left and right sides of thumbnails to make the total size of the thumbnails match the values that you specified for thumbnail MaxWidth and MaxHeight settings.

tInterval :: Lens' Thumbnails (Maybe Text) Source #

The approximate number of seconds between thumbnails. Specify an integer value.

tMaxWidth :: Lens' Thumbnails (Maybe Text) Source #

The maximum width of thumbnails in pixels. If you specify auto, Elastic Transcoder uses 1920 (Full HD) as the default value. If you specify a numeric value, enter an even integer between 32 and 4096.

TimeSpan

data TimeSpan Source #

Settings that determine when a clip begins and how long it lasts.

See: timeSpan smart constructor.

Instances

Eq TimeSpan Source # 
Data TimeSpan Source # 

Methods

gfoldl :: (forall d b. Data d => c (d -> b) -> d -> c b) -> (forall g. g -> c g) -> TimeSpan -> c TimeSpan #

gunfold :: (forall b r. Data b => c (b -> r) -> c r) -> (forall r. r -> c r) -> Constr -> c TimeSpan #

toConstr :: TimeSpan -> Constr #

dataTypeOf :: TimeSpan -> DataType #

dataCast1 :: Typeable (* -> *) t => (forall d. Data d => c (t d)) -> Maybe (c TimeSpan) #

dataCast2 :: Typeable (* -> * -> *) t => (forall d e. (Data d, Data e) => c (t d e)) -> Maybe (c TimeSpan) #

gmapT :: (forall b. Data b => b -> b) -> TimeSpan -> TimeSpan #

gmapQl :: (r -> r' -> r) -> r -> (forall d. Data d => d -> r') -> TimeSpan -> r #

gmapQr :: (r' -> r -> r) -> r -> (forall d. Data d => d -> r') -> TimeSpan -> r #

gmapQ :: (forall d. Data d => d -> u) -> TimeSpan -> [u] #

gmapQi :: Int -> (forall d. Data d => d -> u) -> TimeSpan -> u #

gmapM :: Monad m => (forall d. Data d => d -> m d) -> TimeSpan -> m TimeSpan #

gmapMp :: MonadPlus m => (forall d. Data d => d -> m d) -> TimeSpan -> m TimeSpan #

gmapMo :: MonadPlus m => (forall d. Data d => d -> m d) -> TimeSpan -> m TimeSpan #

Read TimeSpan Source # 
Show TimeSpan Source # 
Generic TimeSpan Source # 

Associated Types

type Rep TimeSpan :: * -> * #

Methods

from :: TimeSpan -> Rep TimeSpan x #

to :: Rep TimeSpan x -> TimeSpan #

Hashable TimeSpan Source # 

Methods

hashWithSalt :: Int -> TimeSpan -> Int #

hash :: TimeSpan -> Int #

ToJSON TimeSpan Source # 
FromJSON TimeSpan Source # 
NFData TimeSpan Source # 

Methods

rnf :: TimeSpan -> () #

type Rep TimeSpan Source # 
type Rep TimeSpan = D1 * (MetaData "TimeSpan" "Network.AWS.ElasticTranscoder.Types.Product" "amazonka-elastictranscoder-1.6.0-38CPPD4da2X1lqPFWRIqIv" False) (C1 * (MetaCons "TimeSpan'" PrefixI True) ((:*:) * (S1 * (MetaSel (Just Symbol "_tsStartTime") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))) (S1 * (MetaSel (Just Symbol "_tsDuration") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text)))))

timeSpan :: TimeSpan Source #

Creates a value of TimeSpan with the minimum fields required to make a request.

Use one of the following lenses to modify other fields as desired:

  • tsStartTime - The place in the input file where you want a clip to start. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, Elastic Transcoder starts at the beginning of the input file.
  • tsDuration - The duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, Elastic Transcoder creates an output file from StartTime to the end of the file. If you specify a value longer than the duration of the input file, Elastic Transcoder transcodes the file and returns a warning message.

tsStartTime :: Lens' TimeSpan (Maybe Text) Source #

The place in the input file where you want a clip to start. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, Elastic Transcoder starts at the beginning of the input file.

tsDuration :: Lens' TimeSpan (Maybe Text) Source #

The duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, Elastic Transcoder creates an output file from StartTime to the end of the file. If you specify a value longer than the duration of the input file, Elastic Transcoder transcodes the file and returns a warning message.

Timing

data Timing Source #

Details about the timing of a job.

See: timing smart constructor.

Instances

Eq Timing Source # 

Methods

(==) :: Timing -> Timing -> Bool #

(/=) :: Timing -> Timing -> Bool #

Data Timing Source # 

Methods

gfoldl :: (forall d b. Data d => c (d -> b) -> d -> c b) -> (forall g. g -> c g) -> Timing -> c Timing #

gunfold :: (forall b r. Data b => c (b -> r) -> c r) -> (forall r. r -> c r) -> Constr -> c Timing #

toConstr :: Timing -> Constr #

dataTypeOf :: Timing -> DataType #

dataCast1 :: Typeable (* -> *) t => (forall d. Data d => c (t d)) -> Maybe (c Timing) #

dataCast2 :: Typeable (* -> * -> *) t => (forall d e. (Data d, Data e) => c (t d e)) -> Maybe (c Timing) #

gmapT :: (forall b. Data b => b -> b) -> Timing -> Timing #

gmapQl :: (r -> r' -> r) -> r -> (forall d. Data d => d -> r') -> Timing -> r #

gmapQr :: (r' -> r -> r) -> r -> (forall d. Data d => d -> r') -> Timing -> r #

gmapQ :: (forall d. Data d => d -> u) -> Timing -> [u] #

gmapQi :: Int -> (forall d. Data d => d -> u) -> Timing -> u #

gmapM :: Monad m => (forall d. Data d => d -> m d) -> Timing -> m Timing #

gmapMp :: MonadPlus m => (forall d. Data d => d -> m d) -> Timing -> m Timing #

gmapMo :: MonadPlus m => (forall d. Data d => d -> m d) -> Timing -> m Timing #

Read Timing Source # 
Show Timing Source # 
Generic Timing Source # 

Associated Types

type Rep Timing :: * -> * #

Methods

from :: Timing -> Rep Timing x #

to :: Rep Timing x -> Timing #

Hashable Timing Source # 

Methods

hashWithSalt :: Int -> Timing -> Int #

hash :: Timing -> Int #

FromJSON Timing Source # 
NFData Timing Source # 

Methods

rnf :: Timing -> () #

type Rep Timing Source # 
type Rep Timing = D1 * (MetaData "Timing" "Network.AWS.ElasticTranscoder.Types.Product" "amazonka-elastictranscoder-1.6.0-38CPPD4da2X1lqPFWRIqIv" False) (C1 * (MetaCons "Timing'" PrefixI True) ((:*:) * (S1 * (MetaSel (Just Symbol "_tSubmitTimeMillis") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Integer))) ((:*:) * (S1 * (MetaSel (Just Symbol "_tFinishTimeMillis") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Integer))) (S1 * (MetaSel (Just Symbol "_tStartTimeMillis") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Integer))))))

timing :: Timing Source #

Creates a value of Timing with the minimum fields required to make a request.

Use one of the following lenses to modify other fields as desired:

  • tSubmitTimeMillis - The time the job was submitted to Elastic Transcoder, in epoch milliseconds.
  • tFinishTimeMillis - The time the job finished transcoding, in epoch milliseconds.
  • tStartTimeMillis - The time the job began transcoding, in epoch milliseconds.

tSubmitTimeMillis :: Lens' Timing (Maybe Integer) Source #

The time the job was submitted to Elastic Transcoder, in epoch milliseconds.

tFinishTimeMillis :: Lens' Timing (Maybe Integer) Source #

The time the job finished transcoding, in epoch milliseconds.

tStartTimeMillis :: Lens' Timing (Maybe Integer) Source #

The time the job began transcoding, in epoch milliseconds.

VideoParameters

data VideoParameters Source #

The VideoParameters structure.

See: videoParameters smart constructor.

Instances

Eq VideoParameters Source # 
Data VideoParameters Source # 

Methods

gfoldl :: (forall d b. Data d => c (d -> b) -> d -> c b) -> (forall g. g -> c g) -> VideoParameters -> c VideoParameters #

gunfold :: (forall b r. Data b => c (b -> r) -> c r) -> (forall r. r -> c r) -> Constr -> c VideoParameters #

toConstr :: VideoParameters -> Constr #

dataTypeOf :: VideoParameters -> DataType #

dataCast1 :: Typeable (* -> *) t => (forall d. Data d => c (t d)) -> Maybe (c VideoParameters) #

dataCast2 :: Typeable (* -> * -> *) t => (forall d e. (Data d, Data e) => c (t d e)) -> Maybe (c VideoParameters) #

gmapT :: (forall b. Data b => b -> b) -> VideoParameters -> VideoParameters #

gmapQl :: (r -> r' -> r) -> r -> (forall d. Data d => d -> r') -> VideoParameters -> r #

gmapQr :: (r' -> r -> r) -> r -> (forall d. Data d => d -> r') -> VideoParameters -> r #

gmapQ :: (forall d. Data d => d -> u) -> VideoParameters -> [u] #

gmapQi :: Int -> (forall d. Data d => d -> u) -> VideoParameters -> u #

gmapM :: Monad m => (forall d. Data d => d -> m d) -> VideoParameters -> m VideoParameters #

gmapMp :: MonadPlus m => (forall d. Data d => d -> m d) -> VideoParameters -> m VideoParameters #

gmapMo :: MonadPlus m => (forall d. Data d => d -> m d) -> VideoParameters -> m VideoParameters #

Read VideoParameters Source # 
Show VideoParameters Source # 
Generic VideoParameters Source # 
Hashable VideoParameters Source # 
ToJSON VideoParameters Source # 
FromJSON VideoParameters Source # 
NFData VideoParameters Source # 

Methods

rnf :: VideoParameters -> () #

type Rep VideoParameters Source # 
type Rep VideoParameters = D1 * (MetaData "VideoParameters" "Network.AWS.ElasticTranscoder.Types.Product" "amazonka-elastictranscoder-1.6.0-38CPPD4da2X1lqPFWRIqIv" False) (C1 * (MetaCons "VideoParameters'" PrefixI True) ((:*:) * ((:*:) * ((:*:) * (S1 * (MetaSel (Just Symbol "_vpKeyframesMaxDist") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))) ((:*:) * (S1 * (MetaSel (Just Symbol "_vpFrameRate") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))) (S1 * (MetaSel (Just Symbol "_vpSizingPolicy") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))))) ((:*:) * ((:*:) * (S1 * (MetaSel (Just Symbol "_vpMaxFrameRate") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))) (S1 * (MetaSel (Just Symbol "_vpMaxHeight") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text)))) ((:*:) * (S1 * (MetaSel (Just Symbol "_vpWatermarks") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe [PresetWatermark]))) (S1 * (MetaSel (Just Symbol "_vpDisplayAspectRatio") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text)))))) ((:*:) * ((:*:) * ((:*:) * (S1 * (MetaSel (Just Symbol "_vpResolution") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))) (S1 * (MetaSel (Just Symbol "_vpCodec") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text)))) ((:*:) * (S1 * (MetaSel (Just Symbol "_vpAspectRatio") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))) (S1 * (MetaSel (Just Symbol "_vpPaddingPolicy") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))))) ((:*:) * ((:*:) * (S1 * (MetaSel (Just Symbol "_vpMaxWidth") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))) (S1 * (MetaSel (Just Symbol "_vpBitRate") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text)))) ((:*:) * (S1 * (MetaSel (Just Symbol "_vpFixedGOP") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))) (S1 * (MetaSel (Just Symbol "_vpCodecOptions") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe (Map Text Text)))))))))

videoParameters :: VideoParameters Source #

Creates a value of VideoParameters with the minimum fields required to make a request.

Use one of the following lenses to modify other fields as desired:

  • vpKeyframesMaxDist - Applicable only when the value of Video:Codec is one of H.264 , MPEG2 , or VP8 . The maximum number of frames between key frames. Key frames are fully encoded frames; the frames between key frames are encoded based, in part, on the content of the key frames. The value is an integer formatted as a string; valid values are between 1 (every frame is a key frame) and 100000, inclusive. A higher value results in higher compression but may also discernibly decrease video quality. For Smooth outputs, the FrameRate must have a constant ratio to the KeyframesMaxDist . This allows Smooth playlists to switch between different quality levels while the file is being played. For example, an input file can have a FrameRate of 30 with a KeyframesMaxDist of 90. The output file then needs to have a ratio of 1:3. Valid outputs would have FrameRate of 30, 25, and 10, and KeyframesMaxDist of 90, 75, and 30, respectively. Alternately, this can be achieved by setting FrameRate to auto and having the same values for MaxFrameRate and KeyframesMaxDist .
  • vpFrameRate - The frames per second for the video stream in the output file. Valid values include: auto , 10 , 15 , 23.97 , 24 , 25 , 29.97 , 30 , 60 If you specify auto , Elastic Transcoder uses the detected frame rate of the input source. If you specify a frame rate, we recommend that you perform the following calculation: Frame rate = maximum recommended decoding speed in luma samplessecond (width in pixels * height in pixels) where: * width in pixels and height in pixels represent the Resolution of the output video. * maximum recommended decoding speed in Luma samples/second is less than or equal to the maximum value listed in the following table, based on the value that you specified for Level. The maximum recommended decoding speed in Luma samplessecond for each level is described in the following list (Level - Decoding speed/ ): * 1 - 380160 * 1b - 380160 * 1.1 - 76800 * 1.2 - 1536000 * 1.3 - 3041280 * 2 - 3041280 * 2.1 - 5068800 * 2.2 - 5184000 * 3 - 10368000 * 3.1 - 27648000 * 3.2 - 55296000 * 4 - 62914560 * 4.1 - 62914560
  • vpSizingPolicy - Specify one of the following values to control scaling of the output video: * Fit : Elastic Transcoder scales the output video so it matches the value that you specified in either MaxWidth or MaxHeight without exceeding the other value. * Fill : Elastic Transcoder scales the output video so it matches the value that you specified in either MaxWidth or MaxHeight and matches or exceeds the other value. Elastic Transcoder centers the output video and then crops it in the dimension (if any) that exceeds the maximum value. * Stretch : Elastic Transcoder stretches the output video to match the values that you specified for MaxWidth and MaxHeight . If the relative proportions of the input video and the output video are different, the output video will be distorted. * Keep : Elastic Transcoder does not scale the output video. If either dimension of the input video exceeds the values that you specified for MaxWidth and MaxHeight , Elastic Transcoder crops the output video. * ShrinkToFit : Elastic Transcoder scales the output video down so that its dimensions match the values that you specified for at least one of MaxWidth and MaxHeight without exceeding either value. If you specify this option, Elastic Transcoder does not scale the video up. * ShrinkToFill : Elastic Transcoder scales the output video down so that its dimensions match the values that you specified for at least one of MaxWidth and MaxHeight without dropping below either value. If you specify this option, Elastic Transcoder does not scale the video up.
  • vpMaxFrameRate - If you specify auto for FrameRate , Elastic Transcoder uses the frame rate of the input video for the frame rate of the output video. Specify the maximum frame rate that you want Elastic Transcoder to use when the frame rate of the input video is greater than the desired maximum frame rate of the output video. Valid values include: 10 , 15 , 23.97 , 24 , 25 , 29.97 , 30 , 60 .
  • vpMaxHeight - The maximum height of the output video in pixels. If you specify auto , Elastic Transcoder uses 1080 (Full HD) as the default value. If you specify a numeric value, enter an even integer between 96 and 3072.
  • vpWatermarks - Settings for the size, location, and opacity of graphics that you want Elastic Transcoder to overlay over videos that are transcoded using this preset. You can specify settings for up to four watermarks. Watermarks appear in the specified size and location, and with the specified opacity for the duration of the transcoded video. Watermarks can be in .png or .jpg format. If you want to display a watermark that is not rectangular, use the .png format, which supports transparency. When you create a job that uses this preset, you specify the .png or .jpg graphics that you want Elastic Transcoder to include in the transcoded videos. You can specify fewer graphics in the job than you specify watermark settings in the preset, which allows you to use the same preset for up to four watermarks that have different dimensions.
  • vpDisplayAspectRatio - The value that Elastic Transcoder adds to the metadata in the output file.
  • vpResolution - Important: To better control resolution and aspect ratio of output videos, we recommend that you use the values MaxWidth , MaxHeight , SizingPolicy , PaddingPolicy , and DisplayAspectRatio instead of Resolution and AspectRatio . The two groups of settings are mutually exclusive. Do not use them together. The width and height of the video in the output file, in pixels. Valid values are auto and width x height : * auto : Elastic Transcoder attempts to preserve the width and height of the input file, subject to the following rules. * width x height : The width and height of the output video in pixels. Note the following about specifying the width and height: * The width must be an even integer between 128 and 4096, inclusive. * The height must be an even integer between 96 and 3072, inclusive. * If you specify a resolution that is less than the resolution of the input file, Elastic Transcoder rescales the output file to the lower resolution. * If you specify a resolution that is greater than the resolution of the input file, Elastic Transcoder rescales the output to the higher resolution. * We recommend that you specify a resolution for which the product of width and height is less than or equal to the applicable value in the following list (List - Max width x height value ): * 1 - 25344 * 1b - 25344 * 1.1 - 101376 * 1.2 - 101376 * 1.3 - 101376 * 2 - 101376 * 2.1 - 202752 * 2.2 - 404720 * 3 - 404720 * 3.1 - 921600 * 3.2 - 1310720 * 4 - 2097152 * 4.1 - 2097152
  • vpCodec - The video codec for the output file. Valid values include gif , H.264 , mpeg2 , vp8 , and vp9 . You can only specify vp8 and vp9 when the container type is webm , gif when the container type is gif , and mpeg2 when the container type is mpg .
  • vpAspectRatio - Important: To better control resolution and aspect ratio of output videos, we recommend that you use the values MaxWidth , MaxHeight , SizingPolicy , PaddingPolicy , and DisplayAspectRatio instead of Resolution and AspectRatio . The two groups of settings are mutually exclusive. Do not use them together. The display aspect ratio of the video in the output file. Valid values include: auto , 1:1 , 4:3 , 3:2 , 16:9 If you specify auto , Elastic Transcoder tries to preserve the aspect ratio of the input file. If you specify an aspect ratio for the output file that differs from aspect ratio of the input file, Elastic Transcoder adds pillarboxing (black bars on the sides) or letterboxing (black bars on the top and bottom) to maintain the aspect ratio of the active region of the video.
  • vpPaddingPolicy - When you set PaddingPolicy to Pad , Elastic Transcoder may add black bars to the top and bottom and/or left and right sides of the output video to make the total size of the output video match the values that you specified for MaxWidth and MaxHeight .
  • vpMaxWidth - The maximum width of the output video in pixels. If you specify auto , Elastic Transcoder uses 1920 (Full HD) as the default value. If you specify a numeric value, enter an even integer between 128 and 4096.
  • vpBitRate - The bit rate of the video stream in the output file, in kilobitssecond. Valid values depend on the values of Level and Profile . If you specify auto , Elastic Transcoder uses the detected bit rate of the input source. If you specify a value other than auto , we recommend that you specify a value less than or equal to the maximum H.264-compliant value listed for your level and profile: Level - Maximum video bit rate in kilobits/second (baseline and main Profile) : maximum video bit rate in kilobits/second (high Profile)/ * 1 - 64 : 80 * 1b - 128 : 160 * 1.1 - 192 : 240 * 1.2 - 384 : 480 * 1.3 - 768 : 960 * 2 - 2000 : 2500 * 3 - 10000 : 12500 * 3.1 - 14000 : 17500 * 3.2 - 20000 : 25000 * 4 - 20000 : 25000 * 4.1 - 50000 : 62500
  • vpFixedGOP - Applicable only when the value of Video:Codec is one of H.264 , MPEG2 , or VP8 . Whether to use a fixed value for FixedGOP . Valid values are true and false : * true : Elastic Transcoder uses the value of KeyframesMaxDist for the distance between key frames (the number of frames in a group of pictures, or GOP). * false : The distance between key frames can vary. Important: FixedGOP must be set to true for fmp4 containers.
  • vpCodecOptions - Profile (H.264VP8VP9 Only) The H.264 profile that you want to use for the output file. Elastic Transcoder supports the following profiles: * baseline : The profile most commonly used for videoconferencing and for mobile applications. * main : The profile used for standard-definition digital TV broadcasts. * high : The profile used for high-definition digital TV broadcasts and for Blu-ray discs. Level (H.264 Only) The H.264 level that you want to use for the output file. Elastic Transcoder supports the following levels: 1 , 1b , 1.1 , 1.2 , 1.3 , 2 , 2.1 , 2.2 , 3 , 3.1 , 3.2 , 4 , 4.1 MaxReferenceFrames (H.264 Only) Applicable only when the value of Video:Codec is H.264. The maximum number of previously decoded frames to use as a reference for decoding future frames. Valid values are integers 0 through 16, but we recommend that you not use a value greater than the following: Min(Floor(Maximum decoded picture buffer in macroblocks * 256 / (Width in pixels * Height in pixels)), 16) where Width in pixels and Height in pixels represent either MaxWidth and MaxHeight, or Resolution. Maximum decoded picture buffer in macroblocks depends on the value of the Level object. See the list below. (A macroblock is a block of pixels measuring 16x16.) * 1 - 396 * 1b - 396 * 1.1 - 900 * 1.2 - 2376 * 1.3 - 2376 * 2 - 2376 * 2.1 - 4752 * 2.2 - 8100 * 3 - 8100 * 3.1 - 18000 * 3.2 - 20480 * 4 - 32768 * 4.1 - 32768 MaxBitRate (Optional, H.264MPEG2VP8/VP9 only) The maximum number of bits per second in a video buffer; the size of the buffer is specified by BufferSize . Specify a value between 16 and 62,500. You can reduce the bandwidth required to stream a video by reducing the maximum bit rate, but this also reduces the quality of the video. BufferSize (Optional, H.264MPEG2VP8/VP9 only) The maximum number of bits in any x seconds of the output video. This window is commonly 10 seconds, the standard segment duration when you're using FMP4 or MPEG-TS for the container type of the output video. Specify an integer greater than 0. If you specify MaxBitRate and omit BufferSize , Elastic Transcoder sets BufferSize to 10 times the value of MaxBitRate . InterlacedMode (Optional, H.264/MPEG2 Only) The interlace mode for the output video. Interlaced video is used to double the perceived frame rate for a video by interlacing two fields (one field on every other line, the other field on the other lines) so that the human eye registers multiple pictures per frame. Interlacing reduces the bandwidth required for transmitting a video, but can result in blurred images and flickering. Valid values include Progressive (no interlacing, top to bottom), TopFirst (top field first), BottomFirst (bottom field first), and Auto . If InterlaceMode is not specified, Elastic Transcoder uses Progressive for the output. If Auto is specified, Elastic Transcoder interlaces the output. ColorSpaceConversionMode (Optional, H.264/MPEG2 Only) The color space conversion Elastic Transcoder applies to the output video. Color spaces are the algorithms used by the computer to store information about how to render color. Bt.601 is the standard for standard definition video, while Bt.709 is the standard for high definition video. Valid values include None , Bt709toBt601 , Bt601toBt709 , and Auto . If you chose Auto for ColorSpaceConversionMode and your output is interlaced, your frame rate is one of 23.97 , 24 , 25 , 29.97 , 50 , or 60 , your SegmentDuration is null, and you are using one of the resolution changes from the list below, Elastic Transcoder applies the following color space conversions: * Standard to HD, 720x480 to 1920x1080 - Elastic Transcoder applies Bt601ToBt709 * Standard to HD, 720x576 to 1920x1080 - Elastic Transcoder applies Bt601ToBt709 * HD to Standard, 1920x1080 to 720x480 - Elastic Transcoder applies Bt709ToBt601 * HD to Standard, 1920x1080 to 720x576 - Elastic Transcoder applies Bt709ToBt601 If you do not specify a ColorSpaceConversionMode , Elastic Transcoder does not change the color space of a file. If you are unsure what ColorSpaceConversionMode was applied to your output file, you can check the AppliedColorSpaceConversion parameter included in your job response. If your job does not have an AppliedColorSpaceConversion in its response, no ColorSpaceConversionMode was applied. ChromaSubsampling The sampling pattern for the chroma (color) channels of the output video. Valid values include yuv420p and yuv422p . yuv420p samples the chroma information of every other horizontal and every other vertical line, yuv422p samples the color information of every horizontal line and every other vertical line. LoopCount (Gif Only) The number of times you want the output gif to loop. Valid values include Infinite and integers between 0 and 100 , inclusive.

vpKeyframesMaxDist :: Lens' VideoParameters (Maybe Text) Source #

Applicable only when the value of Video:Codec is one of H.264 , MPEG2 , or VP8 . The maximum number of frames between key frames. Key frames are fully encoded frames; the frames between key frames are encoded based, in part, on the content of the key frames. The value is an integer formatted as a string; valid values are between 1 (every frame is a key frame) and 100000, inclusive. A higher value results in higher compression but may also discernibly decrease video quality. For Smooth outputs, the FrameRate must have a constant ratio to the KeyframesMaxDist . This allows Smooth playlists to switch between different quality levels while the file is being played. For example, an input file can have a FrameRate of 30 with a KeyframesMaxDist of 90. The output file then needs to have a ratio of 1:3. Valid outputs would have FrameRate of 30, 25, and 10, and KeyframesMaxDist of 90, 75, and 30, respectively. Alternately, this can be achieved by setting FrameRate to auto and having the same values for MaxFrameRate and KeyframesMaxDist .

vpFrameRate :: Lens' VideoParameters (Maybe Text) Source #

The frames per second for the video stream in the output file. Valid values include: auto , 10 , 15 , 23.97 , 24 , 25 , 29.97 , 30 , 60 If you specify auto , Elastic Transcoder uses the detected frame rate of the input source. If you specify a frame rate, we recommend that you perform the following calculation: Frame rate = maximum recommended decoding speed in luma samplessecond (width in pixels * height in pixels) where: * width in pixels and height in pixels represent the Resolution of the output video. * maximum recommended decoding speed in Luma samples/second is less than or equal to the maximum value listed in the following table, based on the value that you specified for Level. The maximum recommended decoding speed in Luma samplessecond for each level is described in the following list (Level - Decoding speed/ ): * 1 - 380160 * 1b - 380160 * 1.1 - 76800 * 1.2 - 1536000 * 1.3 - 3041280 * 2 - 3041280 * 2.1 - 5068800 * 2.2 - 5184000 * 3 - 10368000 * 3.1 - 27648000 * 3.2 - 55296000 * 4 - 62914560 * 4.1 - 62914560

vpSizingPolicy :: Lens' VideoParameters (Maybe Text) Source #

Specify one of the following values to control scaling of the output video: * Fit : Elastic Transcoder scales the output video so it matches the value that you specified in either MaxWidth or MaxHeight without exceeding the other value. * Fill : Elastic Transcoder scales the output video so it matches the value that you specified in either MaxWidth or MaxHeight and matches or exceeds the other value. Elastic Transcoder centers the output video and then crops it in the dimension (if any) that exceeds the maximum value. * Stretch : Elastic Transcoder stretches the output video to match the values that you specified for MaxWidth and MaxHeight . If the relative proportions of the input video and the output video are different, the output video will be distorted. * Keep : Elastic Transcoder does not scale the output video. If either dimension of the input video exceeds the values that you specified for MaxWidth and MaxHeight , Elastic Transcoder crops the output video. * ShrinkToFit : Elastic Transcoder scales the output video down so that its dimensions match the values that you specified for at least one of MaxWidth and MaxHeight without exceeding either value. If you specify this option, Elastic Transcoder does not scale the video up. * ShrinkToFill : Elastic Transcoder scales the output video down so that its dimensions match the values that you specified for at least one of MaxWidth and MaxHeight without dropping below either value. If you specify this option, Elastic Transcoder does not scale the video up.

vpMaxFrameRate :: Lens' VideoParameters (Maybe Text) Source #

If you specify auto for FrameRate , Elastic Transcoder uses the frame rate of the input video for the frame rate of the output video. Specify the maximum frame rate that you want Elastic Transcoder to use when the frame rate of the input video is greater than the desired maximum frame rate of the output video. Valid values include: 10 , 15 , 23.97 , 24 , 25 , 29.97 , 30 , 60 .

vpMaxHeight :: Lens' VideoParameters (Maybe Text) Source #

The maximum height of the output video in pixels. If you specify auto , Elastic Transcoder uses 1080 (Full HD) as the default value. If you specify a numeric value, enter an even integer between 96 and 3072.

vpWatermarks :: Lens' VideoParameters [PresetWatermark] Source #

Settings for the size, location, and opacity of graphics that you want Elastic Transcoder to overlay over videos that are transcoded using this preset. You can specify settings for up to four watermarks. Watermarks appear in the specified size and location, and with the specified opacity for the duration of the transcoded video. Watermarks can be in .png or .jpg format. If you want to display a watermark that is not rectangular, use the .png format, which supports transparency. When you create a job that uses this preset, you specify the .png or .jpg graphics that you want Elastic Transcoder to include in the transcoded videos. You can specify fewer graphics in the job than you specify watermark settings in the preset, which allows you to use the same preset for up to four watermarks that have different dimensions.

vpDisplayAspectRatio :: Lens' VideoParameters (Maybe Text) Source #

The value that Elastic Transcoder adds to the metadata in the output file.

vpResolution :: Lens' VideoParameters (Maybe Text) Source #

Important: To better control resolution and aspect ratio of output videos, we recommend that you use the values MaxWidth , MaxHeight , SizingPolicy , PaddingPolicy , and DisplayAspectRatio instead of Resolution and AspectRatio . The two groups of settings are mutually exclusive. Do not use them together. The width and height of the video in the output file, in pixels. Valid values are auto and width x height : * auto : Elastic Transcoder attempts to preserve the width and height of the input file, subject to the following rules. * width x height : The width and height of the output video in pixels. Note the following about specifying the width and height: * The width must be an even integer between 128 and 4096, inclusive. * The height must be an even integer between 96 and 3072, inclusive. * If you specify a resolution that is less than the resolution of the input file, Elastic Transcoder rescales the output file to the lower resolution. * If you specify a resolution that is greater than the resolution of the input file, Elastic Transcoder rescales the output to the higher resolution. * We recommend that you specify a resolution for which the product of width and height is less than or equal to the applicable value in the following list (List - Max width x height value ): * 1 - 25344 * 1b - 25344 * 1.1 - 101376 * 1.2 - 101376 * 1.3 - 101376 * 2 - 101376 * 2.1 - 202752 * 2.2 - 404720 * 3 - 404720 * 3.1 - 921600 * 3.2 - 1310720 * 4 - 2097152 * 4.1 - 2097152

vpCodec :: Lens' VideoParameters (Maybe Text) Source #

The video codec for the output file. Valid values include gif , H.264 , mpeg2 , vp8 , and vp9 . You can only specify vp8 and vp9 when the container type is webm , gif when the container type is gif , and mpeg2 when the container type is mpg .

vpAspectRatio :: Lens' VideoParameters (Maybe Text) Source #

Important: To better control resolution and aspect ratio of output videos, we recommend that you use the values MaxWidth , MaxHeight , SizingPolicy , PaddingPolicy , and DisplayAspectRatio instead of Resolution and AspectRatio . The two groups of settings are mutually exclusive. Do not use them together. The display aspect ratio of the video in the output file. Valid values include: auto , 1:1 , 4:3 , 3:2 , 16:9 If you specify auto , Elastic Transcoder tries to preserve the aspect ratio of the input file. If you specify an aspect ratio for the output file that differs from aspect ratio of the input file, Elastic Transcoder adds pillarboxing (black bars on the sides) or letterboxing (black bars on the top and bottom) to maintain the aspect ratio of the active region of the video.

vpPaddingPolicy :: Lens' VideoParameters (Maybe Text) Source #

When you set PaddingPolicy to Pad , Elastic Transcoder may add black bars to the top and bottom and/or left and right sides of the output video to make the total size of the output video match the values that you specified for MaxWidth and MaxHeight .

vpMaxWidth :: Lens' VideoParameters (Maybe Text) Source #

The maximum width of the output video in pixels. If you specify auto , Elastic Transcoder uses 1920 (Full HD) as the default value. If you specify a numeric value, enter an even integer between 128 and 4096.

vpBitRate :: Lens' VideoParameters (Maybe Text) Source #

The bit rate of the video stream in the output file, in kilobitssecond. Valid values depend on the values of Level and Profile . If you specify auto , Elastic Transcoder uses the detected bit rate of the input source. If you specify a value other than auto , we recommend that you specify a value less than or equal to the maximum H.264-compliant value listed for your level and profile: Level - Maximum video bit rate in kilobits/second (baseline and main Profile) : maximum video bit rate in kilobits/second (high Profile)/ * 1 - 64 : 80 * 1b - 128 : 160 * 1.1 - 192 : 240 * 1.2 - 384 : 480 * 1.3 - 768 : 960 * 2 - 2000 : 2500 * 3 - 10000 : 12500 * 3.1 - 14000 : 17500 * 3.2 - 20000 : 25000 * 4 - 20000 : 25000 * 4.1 - 50000 : 62500

vpFixedGOP :: Lens' VideoParameters (Maybe Text) Source #

Applicable only when the value of Video:Codec is one of H.264 , MPEG2 , or VP8 . Whether to use a fixed value for FixedGOP . Valid values are true and false : * true : Elastic Transcoder uses the value of KeyframesMaxDist for the distance between key frames (the number of frames in a group of pictures, or GOP). * false : The distance between key frames can vary. Important: FixedGOP must be set to true for fmp4 containers.

vpCodecOptions :: Lens' VideoParameters (HashMap Text Text) Source #

Profile (H.264VP8VP9 Only) The H.264 profile that you want to use for the output file. Elastic Transcoder supports the following profiles: * baseline : The profile most commonly used for videoconferencing and for mobile applications. * main : The profile used for standard-definition digital TV broadcasts. * high : The profile used for high-definition digital TV broadcasts and for Blu-ray discs. Level (H.264 Only) The H.264 level that you want to use for the output file. Elastic Transcoder supports the following levels: 1 , 1b , 1.1 , 1.2 , 1.3 , 2 , 2.1 , 2.2 , 3 , 3.1 , 3.2 , 4 , 4.1 MaxReferenceFrames (H.264 Only) Applicable only when the value of Video:Codec is H.264. The maximum number of previously decoded frames to use as a reference for decoding future frames. Valid values are integers 0 through 16, but we recommend that you not use a value greater than the following: Min(Floor(Maximum decoded picture buffer in macroblocks * 256 / (Width in pixels * Height in pixels)), 16) where Width in pixels and Height in pixels represent either MaxWidth and MaxHeight, or Resolution. Maximum decoded picture buffer in macroblocks depends on the value of the Level object. See the list below. (A macroblock is a block of pixels measuring 16x16.) * 1 - 396 * 1b - 396 * 1.1 - 900 * 1.2 - 2376 * 1.3 - 2376 * 2 - 2376 * 2.1 - 4752 * 2.2 - 8100 * 3 - 8100 * 3.1 - 18000 * 3.2 - 20480 * 4 - 32768 * 4.1 - 32768 MaxBitRate (Optional, H.264MPEG2VP8/VP9 only) The maximum number of bits per second in a video buffer; the size of the buffer is specified by BufferSize . Specify a value between 16 and 62,500. You can reduce the bandwidth required to stream a video by reducing the maximum bit rate, but this also reduces the quality of the video. BufferSize (Optional, H.264MPEG2VP8/VP9 only) The maximum number of bits in any x seconds of the output video. This window is commonly 10 seconds, the standard segment duration when you're using FMP4 or MPEG-TS for the container type of the output video. Specify an integer greater than 0. If you specify MaxBitRate and omit BufferSize , Elastic Transcoder sets BufferSize to 10 times the value of MaxBitRate . InterlacedMode (Optional, H.264/MPEG2 Only) The interlace mode for the output video. Interlaced video is used to double the perceived frame rate for a video by interlacing two fields (one field on every other line, the other field on the other lines) so that the human eye registers multiple pictures per frame. Interlacing reduces the bandwidth required for transmitting a video, but can result in blurred images and flickering. Valid values include Progressive (no interlacing, top to bottom), TopFirst (top field first), BottomFirst (bottom field first), and Auto . If InterlaceMode is not specified, Elastic Transcoder uses Progressive for the output. If Auto is specified, Elastic Transcoder interlaces the output. ColorSpaceConversionMode (Optional, H.264/MPEG2 Only) The color space conversion Elastic Transcoder applies to the output video. Color spaces are the algorithms used by the computer to store information about how to render color. Bt.601 is the standard for standard definition video, while Bt.709 is the standard for high definition video. Valid values include None , Bt709toBt601 , Bt601toBt709 , and Auto . If you chose Auto for ColorSpaceConversionMode and your output is interlaced, your frame rate is one of 23.97 , 24 , 25 , 29.97 , 50 , or 60 , your SegmentDuration is null, and you are using one of the resolution changes from the list below, Elastic Transcoder applies the following color space conversions: * Standard to HD, 720x480 to 1920x1080 - Elastic Transcoder applies Bt601ToBt709 * Standard to HD, 720x576 to 1920x1080 - Elastic Transcoder applies Bt601ToBt709 * HD to Standard, 1920x1080 to 720x480 - Elastic Transcoder applies Bt709ToBt601 * HD to Standard, 1920x1080 to 720x576 - Elastic Transcoder applies Bt709ToBt601 If you do not specify a ColorSpaceConversionMode , Elastic Transcoder does not change the color space of a file. If you are unsure what ColorSpaceConversionMode was applied to your output file, you can check the AppliedColorSpaceConversion parameter included in your job response. If your job does not have an AppliedColorSpaceConversion in its response, no ColorSpaceConversionMode was applied. ChromaSubsampling The sampling pattern for the chroma (color) channels of the output video. Valid values include yuv420p and yuv422p . yuv420p samples the chroma information of every other horizontal and every other vertical line, yuv422p samples the color information of every horizontal line and every other vertical line. LoopCount (Gif Only) The number of times you want the output gif to loop. Valid values include Infinite and integers between 0 and 100 , inclusive.

Warning

data Warning Source #

Elastic Transcoder returns a warning if the resources used by your pipeline are not in the same region as the pipeline.

Using resources in the same region, such as your Amazon S3 buckets, Amazon SNS notification topics, and AWS KMS key, reduces processing time and prevents cross-regional charges.

See: warning smart constructor.

Instances

Eq Warning Source # 

Methods

(==) :: Warning -> Warning -> Bool #

(/=) :: Warning -> Warning -> Bool #

Data Warning Source # 

Methods

gfoldl :: (forall d b. Data d => c (d -> b) -> d -> c b) -> (forall g. g -> c g) -> Warning -> c Warning #

gunfold :: (forall b r. Data b => c (b -> r) -> c r) -> (forall r. r -> c r) -> Constr -> c Warning #

toConstr :: Warning -> Constr #

dataTypeOf :: Warning -> DataType #

dataCast1 :: Typeable (* -> *) t => (forall d. Data d => c (t d)) -> Maybe (c Warning) #

dataCast2 :: Typeable (* -> * -> *) t => (forall d e. (Data d, Data e) => c (t d e)) -> Maybe (c Warning) #

gmapT :: (forall b. Data b => b -> b) -> Warning -> Warning #

gmapQl :: (r -> r' -> r) -> r -> (forall d. Data d => d -> r') -> Warning -> r #

gmapQr :: (r' -> r -> r) -> r -> (forall d. Data d => d -> r') -> Warning -> r #

gmapQ :: (forall d. Data d => d -> u) -> Warning -> [u] #

gmapQi :: Int -> (forall d. Data d => d -> u) -> Warning -> u #

gmapM :: Monad m => (forall d. Data d => d -> m d) -> Warning -> m Warning #

gmapMp :: MonadPlus m => (forall d. Data d => d -> m d) -> Warning -> m Warning #

gmapMo :: MonadPlus m => (forall d. Data d => d -> m d) -> Warning -> m Warning #

Read Warning Source # 
Show Warning Source # 
Generic Warning Source # 

Associated Types

type Rep Warning :: * -> * #

Methods

from :: Warning -> Rep Warning x #

to :: Rep Warning x -> Warning #

Hashable Warning Source # 

Methods

hashWithSalt :: Int -> Warning -> Int #

hash :: Warning -> Int #

FromJSON Warning Source # 
NFData Warning Source # 

Methods

rnf :: Warning -> () #

type Rep Warning Source # 
type Rep Warning = D1 * (MetaData "Warning" "Network.AWS.ElasticTranscoder.Types.Product" "amazonka-elastictranscoder-1.6.0-38CPPD4da2X1lqPFWRIqIv" False) (C1 * (MetaCons "Warning'" PrefixI True) ((:*:) * (S1 * (MetaSel (Just Symbol "_wCode") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text))) (S1 * (MetaSel (Just Symbol "_wMessage") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 * (Maybe Text)))))

warning :: Warning Source #

Creates a value of Warning with the minimum fields required to make a request.

Use one of the following lenses to modify other fields as desired:

  • wCode - The code of the cross-regional warning.
  • wMessage - The message explaining what resources are in a different region from the pipeline.

wCode :: Lens' Warning (Maybe Text) Source #

The code of the cross-regional warning.

wMessage :: Lens' Warning (Maybe Text) Source #

The message explaining what resources are in a different region from the pipeline.