| Copyright | (c) 2013-2018 Brendan Hay |
|---|---|
| License | Mozilla Public License, v. 2.0. |
| Maintainer | Brendan Hay <brendan.g.hay+amazonka@gmail.com> |
| Stability | auto-generated |
| Portability | non-portable (GHC extensions) |
| Safe Haskell | None |
| Language | Haskell2010 |
Network.AWS.MachineLearning
Contents
- Service Configuration
- Errors
- Waiters
- Operations
- UpdateDataSource
- DeleteDataSource
- DescribeTags
- CreateDataSourceFromRedshift
- CreateDataSourceFromS3
- CreateMLModel
- DeleteTags
- DeleteBatchPrediction
- UpdateBatchPrediction
- GetMLModel
- GetDataSource
- UpdateEvaluation
- DeleteEvaluation
- DeleteMLModel
- UpdateMLModel
- GetBatchPrediction
- DescribeBatchPredictions (Paginated)
- CreateDataSourceFromRDS
- CreateEvaluation
- Predict
- DeleteRealtimeEndpoint
- CreateBatchPrediction
- GetEvaluation
- DescribeEvaluations (Paginated)
- CreateRealtimeEndpoint
- AddTags
- DescribeMLModels (Paginated)
- DescribeDataSources (Paginated)
- Types
- Algorithm
- BatchPredictionFilterVariable
- DataSourceFilterVariable
- DetailsAttributes
- EntityStatus
- EvaluationFilterVariable
- MLModelFilterVariable
- MLModelType
- RealtimeEndpointStatus
- SortOrder
- TaggableResourceType
- BatchPrediction
- DataSource
- Evaluation
- MLModel
- PerformanceMetrics
- Prediction
- RDSDataSpec
- RDSDatabase
- RDSDatabaseCredentials
- RDSMetadata
- RealtimeEndpointInfo
- RedshiftDataSpec
- RedshiftDatabase
- RedshiftDatabaseCredentials
- RedshiftMetadata
- S3DataSpec
- Tag
Description
Definition of the public APIs exposed by Amazon Machine Learning
- machineLearning :: Service
- _InvalidTagException :: AsError a => Getting (First ServiceError) a ServiceError
- _InternalServerException :: AsError a => Getting (First ServiceError) a ServiceError
- _InvalidInputException :: AsError a => Getting (First ServiceError) a ServiceError
- _IdempotentParameterMismatchException :: AsError a => Getting (First ServiceError) a ServiceError
- _TagLimitExceededException :: AsError a => Getting (First ServiceError) a ServiceError
- _PredictorNotMountedException :: AsError a => Getting (First ServiceError) a ServiceError
- _ResourceNotFoundException :: AsError a => Getting (First ServiceError) a ServiceError
- _LimitExceededException :: AsError a => Getting (First ServiceError) a ServiceError
- mLModelAvailable :: Wait DescribeMLModels
- batchPredictionAvailable :: Wait DescribeBatchPredictions
- dataSourceAvailable :: Wait DescribeDataSources
- evaluationAvailable :: Wait DescribeEvaluations
- module Network.AWS.MachineLearning.UpdateDataSource
- module Network.AWS.MachineLearning.DeleteDataSource
- module Network.AWS.MachineLearning.DescribeTags
- module Network.AWS.MachineLearning.CreateDataSourceFromRedshift
- module Network.AWS.MachineLearning.CreateDataSourceFromS3
- module Network.AWS.MachineLearning.CreateMLModel
- module Network.AWS.MachineLearning.DeleteTags
- module Network.AWS.MachineLearning.DeleteBatchPrediction
- module Network.AWS.MachineLearning.UpdateBatchPrediction
- module Network.AWS.MachineLearning.GetMLModel
- module Network.AWS.MachineLearning.GetDataSource
- module Network.AWS.MachineLearning.UpdateEvaluation
- module Network.AWS.MachineLearning.DeleteEvaluation
- module Network.AWS.MachineLearning.DeleteMLModel
- module Network.AWS.MachineLearning.UpdateMLModel
- module Network.AWS.MachineLearning.GetBatchPrediction
- module Network.AWS.MachineLearning.DescribeBatchPredictions
- module Network.AWS.MachineLearning.CreateDataSourceFromRDS
- module Network.AWS.MachineLearning.CreateEvaluation
- module Network.AWS.MachineLearning.Predict
- module Network.AWS.MachineLearning.DeleteRealtimeEndpoint
- module Network.AWS.MachineLearning.CreateBatchPrediction
- module Network.AWS.MachineLearning.GetEvaluation
- module Network.AWS.MachineLearning.DescribeEvaluations
- module Network.AWS.MachineLearning.CreateRealtimeEndpoint
- module Network.AWS.MachineLearning.AddTags
- module Network.AWS.MachineLearning.DescribeMLModels
- module Network.AWS.MachineLearning.DescribeDataSources
- data Algorithm = SGD
- data BatchPredictionFilterVariable
- data DataSourceFilterVariable
- data DetailsAttributes
- data EntityStatus
- data EvaluationFilterVariable
- data MLModelFilterVariable
- data MLModelType
- data RealtimeEndpointStatus
- data SortOrder
- data TaggableResourceType
- data BatchPrediction
- batchPrediction :: BatchPrediction
- bpStatus :: Lens' BatchPrediction (Maybe EntityStatus)
- bpLastUpdatedAt :: Lens' BatchPrediction (Maybe UTCTime)
- bpCreatedAt :: Lens' BatchPrediction (Maybe UTCTime)
- bpComputeTime :: Lens' BatchPrediction (Maybe Integer)
- bpInputDataLocationS3 :: Lens' BatchPrediction (Maybe Text)
- bpMLModelId :: Lens' BatchPrediction (Maybe Text)
- bpBatchPredictionDataSourceId :: Lens' BatchPrediction (Maybe Text)
- bpTotalRecordCount :: Lens' BatchPrediction (Maybe Integer)
- bpStartedAt :: Lens' BatchPrediction (Maybe UTCTime)
- bpBatchPredictionId :: Lens' BatchPrediction (Maybe Text)
- bpFinishedAt :: Lens' BatchPrediction (Maybe UTCTime)
- bpInvalidRecordCount :: Lens' BatchPrediction (Maybe Integer)
- bpCreatedByIAMUser :: Lens' BatchPrediction (Maybe Text)
- bpName :: Lens' BatchPrediction (Maybe Text)
- bpMessage :: Lens' BatchPrediction (Maybe Text)
- bpOutputURI :: Lens' BatchPrediction (Maybe Text)
- data DataSource
- dataSource :: DataSource
- dsStatus :: Lens' DataSource (Maybe EntityStatus)
- dsNumberOfFiles :: Lens' DataSource (Maybe Integer)
- dsLastUpdatedAt :: Lens' DataSource (Maybe UTCTime)
- dsCreatedAt :: Lens' DataSource (Maybe UTCTime)
- dsComputeTime :: Lens' DataSource (Maybe Integer)
- dsDataSourceId :: Lens' DataSource (Maybe Text)
- dsRDSMetadata :: Lens' DataSource (Maybe RDSMetadata)
- dsDataSizeInBytes :: Lens' DataSource (Maybe Integer)
- dsStartedAt :: Lens' DataSource (Maybe UTCTime)
- dsFinishedAt :: Lens' DataSource (Maybe UTCTime)
- dsCreatedByIAMUser :: Lens' DataSource (Maybe Text)
- dsName :: Lens' DataSource (Maybe Text)
- dsDataLocationS3 :: Lens' DataSource (Maybe Text)
- dsComputeStatistics :: Lens' DataSource (Maybe Bool)
- dsMessage :: Lens' DataSource (Maybe Text)
- dsRedshiftMetadata :: Lens' DataSource (Maybe RedshiftMetadata)
- dsDataRearrangement :: Lens' DataSource (Maybe Text)
- dsRoleARN :: Lens' DataSource (Maybe Text)
- data Evaluation
- evaluation :: Evaluation
- eStatus :: Lens' Evaluation (Maybe EntityStatus)
- ePerformanceMetrics :: Lens' Evaluation (Maybe PerformanceMetrics)
- eLastUpdatedAt :: Lens' Evaluation (Maybe UTCTime)
- eCreatedAt :: Lens' Evaluation (Maybe UTCTime)
- eComputeTime :: Lens' Evaluation (Maybe Integer)
- eInputDataLocationS3 :: Lens' Evaluation (Maybe Text)
- eMLModelId :: Lens' Evaluation (Maybe Text)
- eStartedAt :: Lens' Evaluation (Maybe UTCTime)
- eFinishedAt :: Lens' Evaluation (Maybe UTCTime)
- eCreatedByIAMUser :: Lens' Evaluation (Maybe Text)
- eName :: Lens' Evaluation (Maybe Text)
- eEvaluationId :: Lens' Evaluation (Maybe Text)
- eMessage :: Lens' Evaluation (Maybe Text)
- eEvaluationDataSourceId :: Lens' Evaluation (Maybe Text)
- data MLModel
- mLModel :: MLModel
- mlmStatus :: Lens' MLModel (Maybe EntityStatus)
- mlmLastUpdatedAt :: Lens' MLModel (Maybe UTCTime)
- mlmTrainingParameters :: Lens' MLModel (HashMap Text Text)
- mlmScoreThresholdLastUpdatedAt :: Lens' MLModel (Maybe UTCTime)
- mlmCreatedAt :: Lens' MLModel (Maybe UTCTime)
- mlmComputeTime :: Lens' MLModel (Maybe Integer)
- mlmInputDataLocationS3 :: Lens' MLModel (Maybe Text)
- mlmMLModelId :: Lens' MLModel (Maybe Text)
- mlmSizeInBytes :: Lens' MLModel (Maybe Integer)
- mlmStartedAt :: Lens' MLModel (Maybe UTCTime)
- mlmScoreThreshold :: Lens' MLModel (Maybe Double)
- mlmFinishedAt :: Lens' MLModel (Maybe UTCTime)
- mlmAlgorithm :: Lens' MLModel (Maybe Algorithm)
- mlmCreatedByIAMUser :: Lens' MLModel (Maybe Text)
- mlmName :: Lens' MLModel (Maybe Text)
- mlmEndpointInfo :: Lens' MLModel (Maybe RealtimeEndpointInfo)
- mlmTrainingDataSourceId :: Lens' MLModel (Maybe Text)
- mlmMessage :: Lens' MLModel (Maybe Text)
- mlmMLModelType :: Lens' MLModel (Maybe MLModelType)
- data PerformanceMetrics
- performanceMetrics :: PerformanceMetrics
- pmProperties :: Lens' PerformanceMetrics (HashMap Text Text)
- data Prediction
- prediction :: Prediction
- pPredictedValue :: Lens' Prediction (Maybe Double)
- pPredictedLabel :: Lens' Prediction (Maybe Text)
- pPredictedScores :: Lens' Prediction (HashMap Text Double)
- pDetails :: Lens' Prediction (HashMap DetailsAttributes Text)
- data RDSDataSpec
- rdsDataSpec :: RDSDatabase -> Text -> RDSDatabaseCredentials -> Text -> Text -> Text -> Text -> RDSDataSpec
- rdsdsDataSchemaURI :: Lens' RDSDataSpec (Maybe Text)
- rdsdsDataSchema :: Lens' RDSDataSpec (Maybe Text)
- rdsdsDataRearrangement :: Lens' RDSDataSpec (Maybe Text)
- rdsdsDatabaseInformation :: Lens' RDSDataSpec RDSDatabase
- rdsdsSelectSqlQuery :: Lens' RDSDataSpec Text
- rdsdsDatabaseCredentials :: Lens' RDSDataSpec RDSDatabaseCredentials
- rdsdsS3StagingLocation :: Lens' RDSDataSpec Text
- rdsdsResourceRole :: Lens' RDSDataSpec Text
- rdsdsServiceRole :: Lens' RDSDataSpec Text
- rdsdsSubnetId :: Lens' RDSDataSpec Text
- rdsdsSecurityGroupIds :: Lens' RDSDataSpec [Text]
- data RDSDatabase
- rdsDatabase :: Text -> Text -> RDSDatabase
- rdsdInstanceIdentifier :: Lens' RDSDatabase Text
- rdsdDatabaseName :: Lens' RDSDatabase Text
- data RDSDatabaseCredentials
- rdsDatabaseCredentials :: Text -> Text -> RDSDatabaseCredentials
- rdsdcUsername :: Lens' RDSDatabaseCredentials Text
- rdsdcPassword :: Lens' RDSDatabaseCredentials Text
- data RDSMetadata
- rdsMetadata :: RDSMetadata
- rmSelectSqlQuery :: Lens' RDSMetadata (Maybe Text)
- rmDataPipelineId :: Lens' RDSMetadata (Maybe Text)
- rmDatabase :: Lens' RDSMetadata (Maybe RDSDatabase)
- rmDatabaseUserName :: Lens' RDSMetadata (Maybe Text)
- rmResourceRole :: Lens' RDSMetadata (Maybe Text)
- rmServiceRole :: Lens' RDSMetadata (Maybe Text)
- data RealtimeEndpointInfo
- realtimeEndpointInfo :: RealtimeEndpointInfo
- reiCreatedAt :: Lens' RealtimeEndpointInfo (Maybe UTCTime)
- reiEndpointURL :: Lens' RealtimeEndpointInfo (Maybe Text)
- reiEndpointStatus :: Lens' RealtimeEndpointInfo (Maybe RealtimeEndpointStatus)
- reiPeakRequestsPerSecond :: Lens' RealtimeEndpointInfo (Maybe Int)
- data RedshiftDataSpec
- redshiftDataSpec :: RedshiftDatabase -> Text -> RedshiftDatabaseCredentials -> Text -> RedshiftDataSpec
- rDataSchemaURI :: Lens' RedshiftDataSpec (Maybe Text)
- rDataSchema :: Lens' RedshiftDataSpec (Maybe Text)
- rDataRearrangement :: Lens' RedshiftDataSpec (Maybe Text)
- rDatabaseInformation :: Lens' RedshiftDataSpec RedshiftDatabase
- rSelectSqlQuery :: Lens' RedshiftDataSpec Text
- rDatabaseCredentials :: Lens' RedshiftDataSpec RedshiftDatabaseCredentials
- rS3StagingLocation :: Lens' RedshiftDataSpec Text
- data RedshiftDatabase
- redshiftDatabase :: Text -> Text -> RedshiftDatabase
- rdDatabaseName :: Lens' RedshiftDatabase Text
- rdClusterIdentifier :: Lens' RedshiftDatabase Text
- data RedshiftDatabaseCredentials
- redshiftDatabaseCredentials :: Text -> Text -> RedshiftDatabaseCredentials
- rdcUsername :: Lens' RedshiftDatabaseCredentials Text
- rdcPassword :: Lens' RedshiftDatabaseCredentials Text
- data RedshiftMetadata
- redshiftMetadata :: RedshiftMetadata
- redSelectSqlQuery :: Lens' RedshiftMetadata (Maybe Text)
- redRedshiftDatabase :: Lens' RedshiftMetadata (Maybe RedshiftDatabase)
- redDatabaseUserName :: Lens' RedshiftMetadata (Maybe Text)
- data S3DataSpec
- s3DataSpec :: Text -> S3DataSpec
- sdsDataSchema :: Lens' S3DataSpec (Maybe Text)
- sdsDataSchemaLocationS3 :: Lens' S3DataSpec (Maybe Text)
- sdsDataRearrangement :: Lens' S3DataSpec (Maybe Text)
- sdsDataLocationS3 :: Lens' S3DataSpec Text
- data Tag
- tag :: Tag
- tagValue :: Lens' Tag (Maybe Text)
- tagKey :: Lens' Tag (Maybe Text)
Service Configuration
machineLearning :: Service Source #
API version 2014-12-12 of the Amazon Machine Learning SDK configuration.
Errors
Error matchers are designed for use with the functions provided by
Control.Exception.Lens.
This allows catching (and rethrowing) service specific errors returned
by MachineLearning.
InvalidTagException
_InvalidTagException :: AsError a => Getting (First ServiceError) a ServiceError Source #
Prism for InvalidTagException' errors.
InternalServerException
_InternalServerException :: AsError a => Getting (First ServiceError) a ServiceError Source #
An error on the server occurred when trying to process a request.
InvalidInputException
_InvalidInputException :: AsError a => Getting (First ServiceError) a ServiceError Source #
An error on the client occurred. Typically, the cause is an invalid input value.
IdempotentParameterMismatchException
_IdempotentParameterMismatchException :: AsError a => Getting (First ServiceError) a ServiceError Source #
A second request to use or change an object was not allowed. This can result from retrying a request using a parameter that was not present in the original request.
TagLimitExceededException
_TagLimitExceededException :: AsError a => Getting (First ServiceError) a ServiceError Source #
Prism for TagLimitExceededException' errors.
PredictorNotMountedException
_PredictorNotMountedException :: AsError a => Getting (First ServiceError) a ServiceError Source #
The exception is thrown when a predict request is made to an unmounted MLModel .
ResourceNotFoundException
_ResourceNotFoundException :: AsError a => Getting (First ServiceError) a ServiceError Source #
A specified resource cannot be located.
LimitExceededException
_LimitExceededException :: AsError a => Getting (First ServiceError) a ServiceError Source #
The subscriber exceeded the maximum number of operations. This exception can occur when listing objects such as DataSource .
Waiters
Waiters poll by repeatedly sending a request until some remote success condition
configured by the Wait specification is fulfilled. The Wait specification
determines how many attempts should be made, in addition to delay and retry strategies.
MLModelAvailable
mLModelAvailable :: Wait DescribeMLModels Source #
Polls DescribeMLModels every 30 seconds until a successful state is reached. An error is returned after 60 failed checks.
BatchPredictionAvailable
batchPredictionAvailable :: Wait DescribeBatchPredictions Source #
Polls DescribeBatchPredictions every 30 seconds until a successful state is reached. An error is returned after 60 failed checks.
DataSourceAvailable
dataSourceAvailable :: Wait DescribeDataSources Source #
Polls DescribeDataSources every 30 seconds until a successful state is reached. An error is returned after 60 failed checks.
EvaluationAvailable
evaluationAvailable :: Wait DescribeEvaluations Source #
Polls DescribeEvaluations every 30 seconds until a successful state is reached. An error is returned after 60 failed checks.
Operations
Some AWS operations return results that are incomplete and require subsequent
requests in order to obtain the entire result set. The process of sending
subsequent requests to continue where a previous request left off is called
pagination. For example, the ListObjects operation of Amazon S3 returns up to
1000 objects at a time, and you must send subsequent requests with the
appropriate Marker in order to retrieve the next page of results.
Operations that have an AWSPager instance can transparently perform subsequent
requests, correctly setting Markers and other request facets to iterate through
the entire result set of a truncated API operation. Operations which support
this have an additional note in the documentation.
Many operations have the ability to filter results on the server side. See the individual operation parameters for details.
UpdateDataSource
DeleteDataSource
DescribeTags
CreateDataSourceFromRedshift
CreateDataSourceFromS3
CreateMLModel
DeleteTags
DeleteBatchPrediction
UpdateBatchPrediction
GetMLModel
GetDataSource
UpdateEvaluation
DeleteEvaluation
DeleteMLModel
UpdateMLModel
GetBatchPrediction
DescribeBatchPredictions (Paginated)
CreateDataSourceFromRDS
CreateEvaluation
Predict
DeleteRealtimeEndpoint
CreateBatchPrediction
GetEvaluation
DescribeEvaluations (Paginated)
CreateRealtimeEndpoint
AddTags
DescribeMLModels (Paginated)
DescribeDataSources (Paginated)
Types
Algorithm
The function used to train an MLModel . Training choices supported by Amazon ML include the following:
SGD- Stochastic Gradient Descent. *RandomForest- Random forest of decision trees.
Constructors
| SGD |
Instances
BatchPredictionFilterVariable
data BatchPredictionFilterVariable Source #
A list of the variables to use in searching or filtering BatchPrediction .
CreatedAt- Sets the search criteria toBatchPredictioncreation date. *Status- Sets the search criteria toBatchPredictionstatus. *Name- Sets the search criteria to the contents ofBatchPrediction____Name. *IAMUser- Sets the search criteria to the user account that invoked theBatchPredictioncreation. *MLModelId- Sets the search criteria to theMLModelused in theBatchPrediction. *DataSourceId- Sets the search criteria to theDataSourceused in theBatchPrediction. *DataURI- Sets the search criteria to the data file(s) used in theBatchPrediction. The URL can identify either a file or an Amazon Simple Storage Service (Amazon S3) bucket or directory.
Constructors
| BatchCreatedAt | |
| BatchDataSourceId | |
| BatchDataURI | |
| BatchIAMUser | |
| BatchLastUpdatedAt | |
| BatchMLModelId | |
| BatchName | |
| BatchStatus |
Instances
DataSourceFilterVariable
data DataSourceFilterVariable Source #
A list of the variables to use in searching or filtering DataSource .
CreatedAt- Sets the search criteria toDataSourcecreation date. *Status- Sets the search criteria toDataSourcestatus. *Name- Sets the search criteria to the contents ofDataSource____Name. *DataUri- Sets the search criteria to the URI of data files used to create theDataSource. The URI can identify either a file or an Amazon Simple Storage Service (Amazon S3) bucket or directory. *IAMUser- Sets the search criteria to the user account that invoked theDataSourcecreation.
Instances
DetailsAttributes
data DetailsAttributes Source #
Contains the key values of DetailsMap : PredictiveModelType - Indicates the type of the MLModel . Algorithm - Indicates the algorithm that was used for the MLModel .
Constructors
| Algorithm | |
| PredictiveModelType |
Instances
EntityStatus
data EntityStatus Source #
Object status with the following possible values:
PENDING*INPROGRESS*FAILED*COMPLETED*DELETED
Constructors
| ESCompleted | |
| ESDeleted | |
| ESFailed | |
| ESInprogress | |
| ESPending |
Instances
EvaluationFilterVariable
data EvaluationFilterVariable Source #
A list of the variables to use in searching or filtering Evaluation .
CreatedAt- Sets the search criteria toEvaluationcreation date. *Status- Sets the search criteria toEvaluationstatus. *Name- Sets the search criteria to the contents ofEvaluation____Name. *IAMUser- Sets the search criteria to the user account that invoked an evaluation. *MLModelId- Sets the search criteria to thePredictorthat was evaluated. *DataSourceId- Sets the search criteria to theDataSourceused in evaluation. *DataUri- Sets the search criteria to the data file(s) used in evaluation. The URL can identify either a file or an Amazon Simple Storage Service (Amazon S3) bucket or directory.
Constructors
| EvalCreatedAt | |
| EvalDataSourceId | |
| EvalDataURI | |
| EvalIAMUser | |
| EvalLastUpdatedAt | |
| EvalMLModelId | |
| EvalName | |
| EvalStatus |
Instances
MLModelFilterVariable
data MLModelFilterVariable Source #
Constructors
| MLMFVAlgorithm | |
| MLMFVCreatedAt | |
| MLMFVIAMUser | |
| MLMFVLastUpdatedAt | |
| MLMFVMLModelType | |
| MLMFVName | |
| MLMFVRealtimeEndpointStatus | |
| MLMFVStatus | |
| MLMFVTrainingDataSourceId | |
| MLMFVTrainingDataURI |
Instances
MLModelType
data MLModelType Source #
Constructors
| Binary | |
| Multiclass | |
| Regression |
Instances
RealtimeEndpointStatus
data RealtimeEndpointStatus Source #
Instances
SortOrder
The sort order specified in a listing condition. Possible values include the following:
asc- Present the information in ascending order (from A-Z). *dsc- Present the information in descending order (from Z-A).
Instances
TaggableResourceType
data TaggableResourceType Source #
Constructors
| BatchPrediction | |
| DataSource | |
| Evaluation | |
| MLModel |
Instances
BatchPrediction
data BatchPrediction Source #
Represents the output of a GetBatchPrediction operation.
The content consists of the detailed metadata, the status, and the data file information of a Batch Prediction .
See: batchPrediction smart constructor.
batchPrediction :: BatchPrediction Source #
Creates a value of BatchPrediction with the minimum fields required to make a request.
Use one of the following lenses to modify other fields as desired:
bpStatus- The status of theBatchPrediction. This element can have one of the following values: *PENDING- Amazon Machine Learning (Amazon ML) submitted a request to generate predictions for a batch of observations. *INPROGRESS- The process is underway. *FAILED- The request to perform a batch prediction did not run to completion. It is not usable. *COMPLETED- The batch prediction process completed successfully. *DELETED- TheBatchPredictionis marked as deleted. It is not usable.bpLastUpdatedAt- The time of the most recent edit to theBatchPrediction. The time is expressed in epoch time.bpCreatedAt- The time that theBatchPredictionwas created. The time is expressed in epoch time.bpComputeTime- Undocumented member.bpInputDataLocationS3- The location of the data file or directory in Amazon Simple Storage Service (Amazon S3).bpMLModelId- The ID of theMLModelthat generated predictions for theBatchPredictionrequest.bpBatchPredictionDataSourceId- The ID of theDataSourcethat points to the group of observations to predict.bpTotalRecordCount- Undocumented member.bpStartedAt- Undocumented member.bpBatchPredictionId- The ID assigned to theBatchPredictionat creation. This value should be identical to the value of theBatchPredictionIDin the request.bpFinishedAt- Undocumented member.bpInvalidRecordCount- Undocumented member.bpCreatedByIAMUser- The AWS user account that invoked theBatchPrediction. The account type can be either an AWS root account or an AWS Identity and Access Management (IAM) user account.bpName- A user-supplied name or description of theBatchPrediction.bpMessage- A description of the most recent details about processing the batch prediction request.bpOutputURI- The location of an Amazon S3 bucket or directory to receive the operation results. The following substrings are not allowed in thes3 keyportion of theoutputURIfield::,//,/./,/../.
bpStatus :: Lens' BatchPrediction (Maybe EntityStatus) Source #
The status of the BatchPrediction . This element can have one of the following values: * PENDING - Amazon Machine Learning (Amazon ML) submitted a request to generate predictions for a batch of observations. * INPROGRESS - The process is underway. * FAILED - The request to perform a batch prediction did not run to completion. It is not usable. * COMPLETED - The batch prediction process completed successfully. * DELETED - The BatchPrediction is marked as deleted. It is not usable.
bpLastUpdatedAt :: Lens' BatchPrediction (Maybe UTCTime) Source #
The time of the most recent edit to the BatchPrediction . The time is expressed in epoch time.
bpCreatedAt :: Lens' BatchPrediction (Maybe UTCTime) Source #
The time that the BatchPrediction was created. The time is expressed in epoch time.
bpComputeTime :: Lens' BatchPrediction (Maybe Integer) Source #
Undocumented member.
bpInputDataLocationS3 :: Lens' BatchPrediction (Maybe Text) Source #
The location of the data file or directory in Amazon Simple Storage Service (Amazon S3).
bpMLModelId :: Lens' BatchPrediction (Maybe Text) Source #
The ID of the MLModel that generated predictions for the BatchPrediction request.
bpBatchPredictionDataSourceId :: Lens' BatchPrediction (Maybe Text) Source #
The ID of the DataSource that points to the group of observations to predict.
bpTotalRecordCount :: Lens' BatchPrediction (Maybe Integer) Source #
Undocumented member.
bpStartedAt :: Lens' BatchPrediction (Maybe UTCTime) Source #
Undocumented member.
bpBatchPredictionId :: Lens' BatchPrediction (Maybe Text) Source #
The ID assigned to the BatchPrediction at creation. This value should be identical to the value of the BatchPredictionID in the request.
bpFinishedAt :: Lens' BatchPrediction (Maybe UTCTime) Source #
Undocumented member.
bpInvalidRecordCount :: Lens' BatchPrediction (Maybe Integer) Source #
Undocumented member.
bpCreatedByIAMUser :: Lens' BatchPrediction (Maybe Text) Source #
The AWS user account that invoked the BatchPrediction . The account type can be either an AWS root account or an AWS Identity and Access Management (IAM) user account.
bpName :: Lens' BatchPrediction (Maybe Text) Source #
A user-supplied name or description of the BatchPrediction .
bpMessage :: Lens' BatchPrediction (Maybe Text) Source #
A description of the most recent details about processing the batch prediction request.
bpOutputURI :: Lens' BatchPrediction (Maybe Text) Source #
The location of an Amazon S3 bucket or directory to receive the operation results. The following substrings are not allowed in the s3 key portion of the outputURI field: :, //, /./, /../.
DataSource
data DataSource Source #
Represents the output of the GetDataSource operation.
The content consists of the detailed metadata and data file information and the current status of the DataSource .
See: dataSource smart constructor.
Instances
dataSource :: DataSource Source #
Creates a value of DataSource with the minimum fields required to make a request.
Use one of the following lenses to modify other fields as desired:
dsStatus- The current status of theDataSource. This element can have one of the following values: * PENDING - Amazon Machine Learning (Amazon ML) submitted a request to create aDataSource. * INPROGRESS - The creation process is underway. * FAILED - The request to create aDataSourcedid not run to completion. It is not usable. * COMPLETED - The creation process completed successfully. * DELETED - TheDataSourceis marked as deleted. It is not usable.dsNumberOfFiles- The number of data files referenced by theDataSource.dsLastUpdatedAt- The time of the most recent edit to theBatchPrediction. The time is expressed in epoch time.dsCreatedAt- The time that theDataSourcewas created. The time is expressed in epoch time.dsComputeTime- Undocumented member.dsDataSourceId- The ID that is assigned to theDataSourceduring creation.dsRDSMetadata- Undocumented member.dsDataSizeInBytes- The total number of observations contained in the data files that theDataSourcereferences.dsStartedAt- Undocumented member.dsFinishedAt- Undocumented member.dsCreatedByIAMUser- The AWS user account from which theDataSourcewas created. The account type can be either an AWS root account or an AWS Identity and Access Management (IAM) user account.dsName- A user-supplied name or description of theDataSource.dsDataLocationS3- The location and name of the data in Amazon Simple Storage Service (Amazon S3) that is used by aDataSource.dsComputeStatistics- The parameter istrueif statistics need to be generated from the observation data.dsMessage- A description of the most recent details about creating theDataSource.dsRedshiftMetadata- Undocumented member.dsDataRearrangement- A JSON string that represents the splitting and rearrangement requirement used when thisDataSourcewas created.dsRoleARN- Undocumented member.
dsStatus :: Lens' DataSource (Maybe EntityStatus) Source #
The current status of the DataSource . This element can have one of the following values: * PENDING - Amazon Machine Learning (Amazon ML) submitted a request to create a DataSource . * INPROGRESS - The creation process is underway. * FAILED - The request to create a DataSource did not run to completion. It is not usable. * COMPLETED - The creation process completed successfully. * DELETED - The DataSource is marked as deleted. It is not usable.
dsNumberOfFiles :: Lens' DataSource (Maybe Integer) Source #
The number of data files referenced by the DataSource .
dsLastUpdatedAt :: Lens' DataSource (Maybe UTCTime) Source #
The time of the most recent edit to the BatchPrediction . The time is expressed in epoch time.
dsCreatedAt :: Lens' DataSource (Maybe UTCTime) Source #
The time that the DataSource was created. The time is expressed in epoch time.
dsComputeTime :: Lens' DataSource (Maybe Integer) Source #
Undocumented member.
dsDataSourceId :: Lens' DataSource (Maybe Text) Source #
The ID that is assigned to the DataSource during creation.
dsRDSMetadata :: Lens' DataSource (Maybe RDSMetadata) Source #
Undocumented member.
dsDataSizeInBytes :: Lens' DataSource (Maybe Integer) Source #
The total number of observations contained in the data files that the DataSource references.
dsStartedAt :: Lens' DataSource (Maybe UTCTime) Source #
Undocumented member.
dsFinishedAt :: Lens' DataSource (Maybe UTCTime) Source #
Undocumented member.
dsCreatedByIAMUser :: Lens' DataSource (Maybe Text) Source #
The AWS user account from which the DataSource was created. The account type can be either an AWS root account or an AWS Identity and Access Management (IAM) user account.
dsName :: Lens' DataSource (Maybe Text) Source #
A user-supplied name or description of the DataSource .
dsDataLocationS3 :: Lens' DataSource (Maybe Text) Source #
The location and name of the data in Amazon Simple Storage Service (Amazon S3) that is used by a DataSource .
dsComputeStatistics :: Lens' DataSource (Maybe Bool) Source #
The parameter is true if statistics need to be generated from the observation data.
dsMessage :: Lens' DataSource (Maybe Text) Source #
A description of the most recent details about creating the DataSource .
dsRedshiftMetadata :: Lens' DataSource (Maybe RedshiftMetadata) Source #
Undocumented member.
dsDataRearrangement :: Lens' DataSource (Maybe Text) Source #
A JSON string that represents the splitting and rearrangement requirement used when this DataSource was created.
Evaluation
data Evaluation Source #
Represents the output of GetEvaluation operation.
The content consists of the detailed metadata and data file information and the current status of the Evaluation .
See: evaluation smart constructor.
Instances
evaluation :: Evaluation Source #
Creates a value of Evaluation with the minimum fields required to make a request.
Use one of the following lenses to modify other fields as desired:
eStatus- The status of the evaluation. This element can have one of the following values: *PENDING- Amazon Machine Learning (Amazon ML) submitted a request to evaluate anMLModel. *INPROGRESS- The evaluation is underway. *FAILED- The request to evaluate anMLModeldid not run to completion. It is not usable. *COMPLETED- The evaluation process completed successfully. *DELETED- TheEvaluationis marked as deleted. It is not usable.ePerformanceMetrics- Measurements of how well theMLModelperformed, using observations referenced by theDataSource. One of the following metrics is returned, based on the type of theMLModel: * BinaryAUC: A binaryMLModeluses the Area Under the Curve (AUC) technique to measure performance. * RegressionRMSE: A regressionMLModeluses the Root Mean Square Error (RMSE) technique to measure performance. RMSE measures the difference between predicted and actual values for a single variable. * MulticlassAvgFScore: A multiclassMLModeluses the F1 score technique to measure performance. For more information about performance metrics, please see the Amazon Machine Learning Developer Guide .eLastUpdatedAt- The time of the most recent edit to theEvaluation. The time is expressed in epoch time.eCreatedAt- The time that theEvaluationwas created. The time is expressed in epoch time.eComputeTime- Undocumented member.eInputDataLocationS3- The location and name of the data in Amazon Simple Storage Server (Amazon S3) that is used in the evaluation.eMLModelId- The ID of theMLModelthat is the focus of the evaluation.eStartedAt- Undocumented member.eFinishedAt- Undocumented member.eCreatedByIAMUser- The AWS user account that invoked the evaluation. The account type can be either an AWS root account or an AWS Identity and Access Management (IAM) user account.eName- A user-supplied name or description of theEvaluation.eEvaluationId- The ID that is assigned to theEvaluationat creation.eMessage- A description of the most recent details about evaluating theMLModel.eEvaluationDataSourceId- The ID of theDataSourcethat is used to evaluate theMLModel.
eStatus :: Lens' Evaluation (Maybe EntityStatus) Source #
The status of the evaluation. This element can have one of the following values: * PENDING - Amazon Machine Learning (Amazon ML) submitted a request to evaluate an MLModel . * INPROGRESS - The evaluation is underway. * FAILED - The request to evaluate an MLModel did not run to completion. It is not usable. * COMPLETED - The evaluation process completed successfully. * DELETED - The Evaluation is marked as deleted. It is not usable.
ePerformanceMetrics :: Lens' Evaluation (Maybe PerformanceMetrics) Source #
Measurements of how well the MLModel performed, using observations referenced by the DataSource . One of the following metrics is returned, based on the type of the MLModel : * BinaryAUC: A binary MLModel uses the Area Under the Curve (AUC) technique to measure performance. * RegressionRMSE: A regression MLModel uses the Root Mean Square Error (RMSE) technique to measure performance. RMSE measures the difference between predicted and actual values for a single variable. * MulticlassAvgFScore: A multiclass MLModel uses the F1 score technique to measure performance. For more information about performance metrics, please see the Amazon Machine Learning Developer Guide .
eLastUpdatedAt :: Lens' Evaluation (Maybe UTCTime) Source #
The time of the most recent edit to the Evaluation . The time is expressed in epoch time.
eCreatedAt :: Lens' Evaluation (Maybe UTCTime) Source #
The time that the Evaluation was created. The time is expressed in epoch time.
eComputeTime :: Lens' Evaluation (Maybe Integer) Source #
Undocumented member.
eInputDataLocationS3 :: Lens' Evaluation (Maybe Text) Source #
The location and name of the data in Amazon Simple Storage Server (Amazon S3) that is used in the evaluation.
eMLModelId :: Lens' Evaluation (Maybe Text) Source #
The ID of the MLModel that is the focus of the evaluation.
eStartedAt :: Lens' Evaluation (Maybe UTCTime) Source #
Undocumented member.
eFinishedAt :: Lens' Evaluation (Maybe UTCTime) Source #
Undocumented member.
eCreatedByIAMUser :: Lens' Evaluation (Maybe Text) Source #
The AWS user account that invoked the evaluation. The account type can be either an AWS root account or an AWS Identity and Access Management (IAM) user account.
eName :: Lens' Evaluation (Maybe Text) Source #
A user-supplied name or description of the Evaluation .
eEvaluationId :: Lens' Evaluation (Maybe Text) Source #
The ID that is assigned to the Evaluation at creation.
eMessage :: Lens' Evaluation (Maybe Text) Source #
A description of the most recent details about evaluating the MLModel .
eEvaluationDataSourceId :: Lens' Evaluation (Maybe Text) Source #
The ID of the DataSource that is used to evaluate the MLModel .
MLModel
Represents the output of a GetMLModel operation.
The content consists of the detailed metadata and the current status of the MLModel .
See: mLModel smart constructor.
Creates a value of MLModel with the minimum fields required to make a request.
Use one of the following lenses to modify other fields as desired:
mlmStatus- The current status of anMLModel. This element can have one of the following values: *PENDING- Amazon Machine Learning (Amazon ML) submitted a request to create anMLModel. *INPROGRESS- The creation process is underway. *FAILED- The request to create anMLModeldidn't run to completion. The model isn't usable. *COMPLETED- The creation process completed successfully. *DELETED- TheMLModelis marked as deleted. It isn't usable.mlmLastUpdatedAt- The time of the most recent edit to theMLModel. The time is expressed in epoch time.mlmTrainingParameters- A list of the training parameters in theMLModel. The list is implemented as a map of key-value pairs. The following is the current set of training parameters: *sgd.maxMLModelSizeInBytes- The maximum allowed size of the model. Depending on the input data, the size of the model might affect its performance. The value is an integer that ranges from100000to2147483648. The default value is33554432. *sgd.maxPasses- The number of times that the training process traverses the observations to build theMLModel. The value is an integer that ranges from1to10000. The default value is10. *sgd.shuffleType- Whether Amazon ML shuffles the training data. Shuffling the data improves a model's ability to find the optimal solution for a variety of data types. The valid values areautoandnone. The default value isnone. *sgd.l1RegularizationAmount- The coefficient regularization L1 norm, which controls overfitting the data by penalizing large coefficients. This parameter tends to drive coefficients to zero, resulting in sparse feature set. If you use this parameter, start by specifying a small value, such as1.0E-08. The value is a double that ranges from0toMAX_DOUBLE. The default is to not use L1 normalization. This parameter can't be used whenL2is specified. Use this parameter sparingly. *sgd.l2RegularizationAmount- The coefficient regularization L2 norm, which controls overfitting the data by penalizing large coefficients. This tends to drive coefficients to small, nonzero values. If you use this parameter, start by specifying a small value, such as1.0E-08. The value is a double that ranges from0toMAX_DOUBLE. The default is to not use L2 normalization. This parameter can't be used whenL1is specified. Use this parameter sparingly.mlmScoreThresholdLastUpdatedAt- The time of the most recent edit to theScoreThreshold. The time is expressed in epoch time.mlmCreatedAt- The time that theMLModelwas created. The time is expressed in epoch time.mlmComputeTime- Undocumented member.mlmInputDataLocationS3- The location of the data file or directory in Amazon Simple Storage Service (Amazon S3).mlmMLModelId- The ID assigned to theMLModelat creation.mlmSizeInBytes- Undocumented member.mlmStartedAt- Undocumented member.mlmScoreThreshold- Undocumented member.mlmFinishedAt- Undocumented member.mlmAlgorithm- The algorithm used to train theMLModel. The following algorithm is supported: *SGD-- Stochastic gradient descent. The goal ofSGDis to minimize the gradient of the loss function.mlmCreatedByIAMUser- The AWS user account from which theMLModelwas created. The account type can be either an AWS root account or an AWS Identity and Access Management (IAM) user account.mlmName- A user-supplied name or description of theMLModel.mlmEndpointInfo- The current endpoint of theMLModel.mlmTrainingDataSourceId- The ID of the trainingDataSource. TheCreateMLModeloperation uses theTrainingDataSourceId.mlmMessage- A description of the most recent details about accessing theMLModel.mlmMLModelType- Identifies theMLModelcategory. The following are the available types: *REGRESSION- Produces a numeric result. For example, "What price should a house be listed at?" *BINARY- Produces one of two possible results. For example, "Is this a child-friendly web site?". *MULTICLASS- Produces one of several possible results. For example, "Is this a HIGH-, LOW-, or MEDIUM-risk trade?".
mlmStatus :: Lens' MLModel (Maybe EntityStatus) Source #
The current status of an MLModel . This element can have one of the following values: * PENDING - Amazon Machine Learning (Amazon ML) submitted a request to create an MLModel . * INPROGRESS - The creation process is underway. * FAILED - The request to create an MLModel didn't run to completion. The model isn't usable. * COMPLETED - The creation process completed successfully. * DELETED - The MLModel is marked as deleted. It isn't usable.
mlmLastUpdatedAt :: Lens' MLModel (Maybe UTCTime) Source #
The time of the most recent edit to the MLModel . The time is expressed in epoch time.
mlmTrainingParameters :: Lens' MLModel (HashMap Text Text) Source #
A list of the training parameters in the MLModel . The list is implemented as a map of key-value pairs. The following is the current set of training parameters: * sgd.maxMLModelSizeInBytes - The maximum allowed size of the model. Depending on the input data, the size of the model might affect its performance. The value is an integer that ranges from 100000 to 2147483648 . The default value is 33554432 . * sgd.maxPasses - The number of times that the training process traverses the observations to build the MLModel . The value is an integer that ranges from 1 to 10000 . The default value is 10 . * sgd.shuffleType - Whether Amazon ML shuffles the training data. Shuffling the data improves a model's ability to find the optimal solution for a variety of data types. The valid values are auto and none . The default value is none . * sgd.l1RegularizationAmount - The coefficient regularization L1 norm, which controls overfitting the data by penalizing large coefficients. This parameter tends to drive coefficients to zero, resulting in sparse feature set. If you use this parameter, start by specifying a small value, such as 1.0E-08 . The value is a double that ranges from 0 to MAX_DOUBLE . The default is to not use L1 normalization. This parameter can't be used when L2 is specified. Use this parameter sparingly. * sgd.l2RegularizationAmount - The coefficient regularization L2 norm, which controls overfitting the data by penalizing large coefficients. This tends to drive coefficients to small, nonzero values. If you use this parameter, start by specifying a small value, such as 1.0E-08 . The value is a double that ranges from 0 to MAX_DOUBLE . The default is to not use L2 normalization. This parameter can't be used when L1 is specified. Use this parameter sparingly.
mlmScoreThresholdLastUpdatedAt :: Lens' MLModel (Maybe UTCTime) Source #
The time of the most recent edit to the ScoreThreshold . The time is expressed in epoch time.
mlmCreatedAt :: Lens' MLModel (Maybe UTCTime) Source #
The time that the MLModel was created. The time is expressed in epoch time.
mlmInputDataLocationS3 :: Lens' MLModel (Maybe Text) Source #
The location of the data file or directory in Amazon Simple Storage Service (Amazon S3).
mlmAlgorithm :: Lens' MLModel (Maybe Algorithm) Source #
The algorithm used to train the MLModel . The following algorithm is supported: * SGD -- Stochastic gradient descent. The goal of SGD is to minimize the gradient of the loss function.
mlmCreatedByIAMUser :: Lens' MLModel (Maybe Text) Source #
The AWS user account from which the MLModel was created. The account type can be either an AWS root account or an AWS Identity and Access Management (IAM) user account.
mlmEndpointInfo :: Lens' MLModel (Maybe RealtimeEndpointInfo) Source #
The current endpoint of the MLModel .
mlmTrainingDataSourceId :: Lens' MLModel (Maybe Text) Source #
The ID of the training DataSource . The CreateMLModel operation uses the TrainingDataSourceId .
mlmMessage :: Lens' MLModel (Maybe Text) Source #
A description of the most recent details about accessing the MLModel .
mlmMLModelType :: Lens' MLModel (Maybe MLModelType) Source #
Identifies the MLModel category. The following are the available types: * REGRESSION - Produces a numeric result. For example, "What price should a house be listed at?" * BINARY - Produces one of two possible results. For example, "Is this a child-friendly web site?". * MULTICLASS - Produces one of several possible results. For example, "Is this a HIGH-, LOW-, or MEDIUM-risk trade?".
PerformanceMetrics
data PerformanceMetrics Source #
Measurements of how well the MLModel performed on known observations. One of the following metrics is returned, based on the type of the MLModel :
- BinaryAUC: The binary
MLModeluses the Area Under the Curve (AUC) technique to measure performance. - RegressionRMSE: The regression
MLModeluses the Root Mean Square Error (RMSE) technique to measure performance. RMSE measures the difference between predicted and actual values for a single variable. - MulticlassAvgFScore: The multiclass
MLModeluses the F1 score technique to measure performance.
For more information about performance metrics, please see the Amazon Machine Learning Developer Guide .
See: performanceMetrics smart constructor.
Instances
performanceMetrics :: PerformanceMetrics Source #
Creates a value of PerformanceMetrics with the minimum fields required to make a request.
Use one of the following lenses to modify other fields as desired:
pmProperties- Undocumented member.
pmProperties :: Lens' PerformanceMetrics (HashMap Text Text) Source #
Undocumented member.
Prediction
data Prediction Source #
The output from a Predict operation:
Details- Contains the following attributes:DetailsAttributes.PREDICTIVE_MODEL_TYPE - REGRESSION | BINARY | MULTICLASSDetailsAttributes.ALGORITHM - SGDPredictedLabel- Present for either aBINARYorMULTICLASSMLModelrequest.PredictedScores- Contains the raw classification score corresponding to each label.PredictedValue- Present for aREGRESSIONMLModelrequest.
See: prediction smart constructor.
Instances
prediction :: Prediction Source #
Creates a value of Prediction with the minimum fields required to make a request.
Use one of the following lenses to modify other fields as desired:
pPredictedValue- The prediction value forREGRESSIONMLModel.pPredictedLabel- The prediction label for either aBINARYorMULTICLASSMLModel.pPredictedScores- Undocumented member.pDetails- Undocumented member.
pPredictedValue :: Lens' Prediction (Maybe Double) Source #
The prediction value for REGRESSION MLModel .
pPredictedLabel :: Lens' Prediction (Maybe Text) Source #
The prediction label for either a BINARY or MULTICLASS MLModel .
pPredictedScores :: Lens' Prediction (HashMap Text Double) Source #
Undocumented member.
pDetails :: Lens' Prediction (HashMap DetailsAttributes Text) Source #
Undocumented member.
RDSDataSpec
data RDSDataSpec Source #
The data specification of an Amazon Relational Database Service (Amazon RDS) DataSource .
See: rdsDataSpec smart constructor.
Arguments
| :: RDSDatabase | |
| -> Text | |
| -> RDSDatabaseCredentials | |
| -> Text | |
| -> Text | |
| -> Text | |
| -> Text | |
| -> RDSDataSpec |
Creates a value of RDSDataSpec with the minimum fields required to make a request.
Use one of the following lenses to modify other fields as desired:
rdsdsDataSchemaURI- The Amazon S3 location of theDataSchema.rdsdsDataSchema- A JSON string that represents the schema for an Amazon RDSDataSource. TheDataSchemadefines the structure of the observation data in the data file(s) referenced in theDataSource. ADataSchemais not required if you specify aDataSchemaUriDefine yourDataSchemaas a series of key-value pairs.attributesandexcludedVariableNameshave an array of key-value pairs for their value. Use the following format to define yourDataSchema. { "version": "1.0", "recordAnnotationFieldName": F1, "recordWeightFieldName": F2, "targetFieldName": F3, "dataFormat": CSV, "dataFileContainsHeader": true, "attributes": [ { "fieldName": F1, "fieldType": TEXT }, { "fieldName": F2, "fieldType": NUMERIC }, { "fieldName": F3, "fieldType": CATEGORICAL }, { "fieldName": F4, "fieldType": NUMERIC }, { "fieldName": F5, "fieldType": CATEGORICAL }, { "fieldName": F6, "fieldType": TEXT }, { "fieldName": F7, "fieldType": WEIGHTED_INT_SEQUENCE }, { "fieldName": F8, "fieldType": WEIGHTED_STRING_SEQUENCE } ], "excludedVariableNames": [ F6 ] }rdsdsDataRearrangement- A JSON string that represents the splitting and rearrangement processing to be applied to aDataSource. If theDataRearrangementparameter is not provided, all of the input data is used to create theDatasource. There are multiple parameters that control what data is used to create a datasource: *percentBeginUsepercentBeginto indicate the beginning of the range of the data used to create the Datasource. If you do not includepercentBeginandpercentEnd, Amazon ML includes all of the data when creating the datasource. *percentEndUsepercentEndto indicate the end of the range of the data used to create the Datasource. If you do not includepercentBeginandpercentEnd, Amazon ML includes all of the data when creating the datasource. *complementThecomplementparameter instructs Amazon ML to use the data that is not included in the range ofpercentBegintopercentEndto create a datasource. Thecomplementparameter is useful if you need to create complementary datasources for training and evaluation. To create a complementary datasource, use the same values forpercentBeginandpercentEnd, along with thecomplementparameter. For example, the following two datasources do not share any data, and can be used to train and evaluate a model. The first datasource has 25 percent of the data, and the second one has 75 percent of the data. Datasource for evaluation:{"splitting":{"percentBegin":0, "percentEnd":25}}Datasource for training:{"splitting":{"percentBegin":0, "percentEnd":25, "complement":"true"}}*strategyTo change how Amazon ML splits the data for a datasource, use thestrategyparameter. The default value for thestrategyparameter issequential, meaning that Amazon ML takes all of the data records between thepercentBeginandpercentEndparameters for the datasource, in the order that the records appear in the input data. The following twoDataRearrangementlines are examples of sequentially ordered training and evaluation datasources: Datasource for evaluation:{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential"}}Datasource for training:{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential", "complement":"true"}}To randomly split the input data into the proportions indicated by the percentBegin and percentEnd parameters, set thestrategyparameter torandomand provide a string that is used as the seed value for the random data splitting (for example, you can use the S3 path to your data as the random seed string). If you choose the random split strategy, Amazon ML assigns each row of data a pseudo-random number between 0 and 100, and then selects the rows that have an assigned number betweenpercentBeginandpercentEnd. Pseudo-random numbers are assigned using both the input seed string value and the byte offset as a seed, so changing the data results in a different split. Any existing ordering is preserved. The random splitting strategy ensures that variables in the training and evaluation data are distributed similarly. It is useful in the cases where the input data may have an implicit sort order, which would otherwise result in training and evaluation datasources containing non-similar data records. The following twoDataRearrangementlines are examples of non-sequentially ordered training and evaluation datasources: Datasource for evaluation:{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3:/my_s3_pathbucket/file.csv"}}Datasource for training:{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3:/my_s3_pathbucket/file.csv", "complement":"true"}}rdsdsDatabaseInformation- Describes theDatabaseNameandInstanceIdentifierof an Amazon RDS database.rdsdsSelectSqlQuery- The query that is used to retrieve the observation data for theDataSource.rdsdsDatabaseCredentials- The AWS Identity and Access Management (IAM) credentials that are used connect to the Amazon RDS database.rdsdsS3StagingLocation- The Amazon S3 location for staging Amazon RDS data. The data retrieved from Amazon RDS usingSelectSqlQueryis stored in this location.rdsdsResourceRole- The role (DataPipelineDefaultResourceRole) assumed by an Amazon Elastic Compute Cloud (Amazon EC2) instance to carry out the copy operation from Amazon RDS to an Amazon S3 task. For more information, see Role templates for data pipelines.rdsdsServiceRole- The role (DataPipelineDefaultRole) assumed by AWS Data Pipeline service to monitor the progress of the copy task from Amazon RDS to Amazon S3. For more information, see Role templates for data pipelines.rdsdsSubnetId- The subnet ID to be used to access a VPC-based RDS DB instance. This attribute is used by Data Pipeline to carry out the copy task from Amazon RDS to Amazon S3.rdsdsSecurityGroupIds- The security group IDs to be used to access a VPC-based RDS DB instance. Ensure that there are appropriate ingress rules set up to allow access to the RDS DB instance. This attribute is used by Data Pipeline to carry out the copy operation from Amazon RDS to an Amazon S3 task.
rdsdsDataSchemaURI :: Lens' RDSDataSpec (Maybe Text) Source #
The Amazon S3 location of the DataSchema .
rdsdsDataSchema :: Lens' RDSDataSpec (Maybe Text) Source #
A JSON string that represents the schema for an Amazon RDS DataSource . The DataSchema defines the structure of the observation data in the data file(s) referenced in the DataSource . A DataSchema is not required if you specify a DataSchemaUri Define your DataSchema as a series of key-value pairs. attributes and excludedVariableNames have an array of key-value pairs for their value. Use the following format to define your DataSchema . { "version": "1.0", "recordAnnotationFieldName": F1, "recordWeightFieldName": F2, "targetFieldName": F3, "dataFormat": CSV, "dataFileContainsHeader": true, "attributes": [ { "fieldName": F1, "fieldType": TEXT }, { "fieldName": F2, "fieldType": NUMERIC }, { "fieldName": F3, "fieldType": CATEGORICAL }, { "fieldName": F4, "fieldType": NUMERIC }, { "fieldName": F5, "fieldType": CATEGORICAL }, { "fieldName": F6, "fieldType": TEXT }, { "fieldName": F7, "fieldType": WEIGHTED_INT_SEQUENCE }, { "fieldName": F8, "fieldType": WEIGHTED_STRING_SEQUENCE } ], "excludedVariableNames": [ F6 ] }
rdsdsDataRearrangement :: Lens' RDSDataSpec (Maybe Text) Source #
A JSON string that represents the splitting and rearrangement processing to be applied to a DataSource . If the DataRearrangement parameter is not provided, all of the input data is used to create the Datasource . There are multiple parameters that control what data is used to create a datasource: * percentBegin Use percentBegin to indicate the beginning of the range of the data used to create the Datasource. If you do not include percentBegin and percentEnd , Amazon ML includes all of the data when creating the datasource. * percentEnd Use percentEnd to indicate the end of the range of the data used to create the Datasource. If you do not include percentBegin and percentEnd , Amazon ML includes all of the data when creating the datasource. * complement The complement parameter instructs Amazon ML to use the data that is not included in the range of percentBegin to percentEnd to create a datasource. The complement parameter is useful if you need to create complementary datasources for training and evaluation. To create a complementary datasource, use the same values for percentBegin and percentEnd , along with the complement parameter. For example, the following two datasources do not share any data, and can be used to train and evaluate a model. The first datasource has 25 percent of the data, and the second one has 75 percent of the data. Datasource for evaluation: {"splitting":{"percentBegin":0, "percentEnd":25}} Datasource for training: {"splitting":{"percentBegin":0, "percentEnd":25, "complement":"true"}} * strategy To change how Amazon ML splits the data for a datasource, use the strategy parameter. The default value for the strategy parameter is sequential , meaning that Amazon ML takes all of the data records between the percentBegin and percentEnd parameters for the datasource, in the order that the records appear in the input data. The following two DataRearrangement lines are examples of sequentially ordered training and evaluation datasources: Datasource for evaluation: {"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential"}} Datasource for training: {"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential", "complement":"true"}} To randomly split the input data into the proportions indicated by the percentBegin and percentEnd parameters, set the strategy parameter to random and provide a string that is used as the seed value for the random data splitting (for example, you can use the S3 path to your data as the random seed string). If you choose the random split strategy, Amazon ML assigns each row of data a pseudo-random number between 0 and 100, and then selects the rows that have an assigned number between percentBegin and percentEnd . Pseudo-random numbers are assigned using both the input seed string value and the byte offset as a seed, so changing the data results in a different split. Any existing ordering is preserved. The random splitting strategy ensures that variables in the training and evaluation data are distributed similarly. It is useful in the cases where the input data may have an implicit sort order, which would otherwise result in training and evaluation datasources containing non-similar data records. The following two DataRearrangement lines are examples of non-sequentially ordered training and evaluation datasources: Datasource for evaluation: {"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3:/my_s3_pathbucket/file.csv"}} Datasource for training: {"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3:/my_s3_pathbucket/file.csv", "complement":"true"}}
rdsdsDatabaseInformation :: Lens' RDSDataSpec RDSDatabase Source #
Describes the DatabaseName and InstanceIdentifier of an Amazon RDS database.
rdsdsSelectSqlQuery :: Lens' RDSDataSpec Text Source #
The query that is used to retrieve the observation data for the DataSource .
rdsdsDatabaseCredentials :: Lens' RDSDataSpec RDSDatabaseCredentials Source #
The AWS Identity and Access Management (IAM) credentials that are used connect to the Amazon RDS database.
rdsdsS3StagingLocation :: Lens' RDSDataSpec Text Source #
The Amazon S3 location for staging Amazon RDS data. The data retrieved from Amazon RDS using SelectSqlQuery is stored in this location.
rdsdsResourceRole :: Lens' RDSDataSpec Text Source #
The role (DataPipelineDefaultResourceRole) assumed by an Amazon Elastic Compute Cloud (Amazon EC2) instance to carry out the copy operation from Amazon RDS to an Amazon S3 task. For more information, see Role templates for data pipelines.
rdsdsServiceRole :: Lens' RDSDataSpec Text Source #
The role (DataPipelineDefaultRole) assumed by AWS Data Pipeline service to monitor the progress of the copy task from Amazon RDS to Amazon S3. For more information, see Role templates for data pipelines.
rdsdsSubnetId :: Lens' RDSDataSpec Text Source #
The subnet ID to be used to access a VPC-based RDS DB instance. This attribute is used by Data Pipeline to carry out the copy task from Amazon RDS to Amazon S3.
rdsdsSecurityGroupIds :: Lens' RDSDataSpec [Text] Source #
The security group IDs to be used to access a VPC-based RDS DB instance. Ensure that there are appropriate ingress rules set up to allow access to the RDS DB instance. This attribute is used by Data Pipeline to carry out the copy operation from Amazon RDS to an Amazon S3 task.
RDSDatabase
data RDSDatabase Source #
The database details of an Amazon RDS database.
See: rdsDatabase smart constructor.
Arguments
| :: Text | |
| -> Text | |
| -> RDSDatabase |
Creates a value of RDSDatabase with the minimum fields required to make a request.
Use one of the following lenses to modify other fields as desired:
rdsdInstanceIdentifier- The ID of an RDS DB instance.rdsdDatabaseName- Undocumented member.
rdsdInstanceIdentifier :: Lens' RDSDatabase Text Source #
The ID of an RDS DB instance.
rdsdDatabaseName :: Lens' RDSDatabase Text Source #
Undocumented member.
RDSDatabaseCredentials
data RDSDatabaseCredentials Source #
The database credentials to connect to a database on an RDS DB instance.
See: rdsDatabaseCredentials smart constructor.
Instances
rdsDatabaseCredentials Source #
Arguments
| :: Text | |
| -> Text | |
| -> RDSDatabaseCredentials |
Creates a value of RDSDatabaseCredentials with the minimum fields required to make a request.
Use one of the following lenses to modify other fields as desired:
rdsdcUsername- Undocumented member.rdsdcPassword- Undocumented member.
rdsdcUsername :: Lens' RDSDatabaseCredentials Text Source #
Undocumented member.
rdsdcPassword :: Lens' RDSDatabaseCredentials Text Source #
Undocumented member.
RDSMetadata
data RDSMetadata Source #
The datasource details that are specific to Amazon RDS.
See: rdsMetadata smart constructor.
rdsMetadata :: RDSMetadata Source #
Creates a value of RDSMetadata with the minimum fields required to make a request.
Use one of the following lenses to modify other fields as desired:
rmSelectSqlQuery- The SQL query that is supplied duringCreateDataSourceFromRDS. Returns only ifVerboseis true inGetDataSourceInput.rmDataPipelineId- The ID of the Data Pipeline instance that is used to carry to copy data from Amazon RDS to Amazon S3. You can use the ID to find details about the instance in the Data Pipeline console.rmDatabase- The database details required to connect to an Amazon RDS.rmDatabaseUserName- Undocumented member.rmResourceRole- The role (DataPipelineDefaultResourceRole) assumed by an Amazon EC2 instance to carry out the copy task from Amazon RDS to Amazon S3. For more information, see Role templates for data pipelines.rmServiceRole- The role (DataPipelineDefaultRole) assumed by the Data Pipeline service to monitor the progress of the copy task from Amazon RDS to Amazon S3. For more information, see Role templates for data pipelines.
rmSelectSqlQuery :: Lens' RDSMetadata (Maybe Text) Source #
The SQL query that is supplied during CreateDataSourceFromRDS . Returns only if Verbose is true in GetDataSourceInput .
rmDataPipelineId :: Lens' RDSMetadata (Maybe Text) Source #
The ID of the Data Pipeline instance that is used to carry to copy data from Amazon RDS to Amazon S3. You can use the ID to find details about the instance in the Data Pipeline console.
rmDatabase :: Lens' RDSMetadata (Maybe RDSDatabase) Source #
The database details required to connect to an Amazon RDS.
rmDatabaseUserName :: Lens' RDSMetadata (Maybe Text) Source #
Undocumented member.
rmResourceRole :: Lens' RDSMetadata (Maybe Text) Source #
The role (DataPipelineDefaultResourceRole) assumed by an Amazon EC2 instance to carry out the copy task from Amazon RDS to Amazon S3. For more information, see Role templates for data pipelines.
rmServiceRole :: Lens' RDSMetadata (Maybe Text) Source #
The role (DataPipelineDefaultRole) assumed by the Data Pipeline service to monitor the progress of the copy task from Amazon RDS to Amazon S3. For more information, see Role templates for data pipelines.
RealtimeEndpointInfo
data RealtimeEndpointInfo Source #
Describes the real-time endpoint information for an MLModel .
See: realtimeEndpointInfo smart constructor.
Instances
realtimeEndpointInfo :: RealtimeEndpointInfo Source #
Creates a value of RealtimeEndpointInfo with the minimum fields required to make a request.
Use one of the following lenses to modify other fields as desired:
reiCreatedAt- The time that the request to create the real-time endpoint for theMLModelwas received. The time is expressed in epoch time.reiEndpointURL- The URI that specifies where to send real-time prediction requests for theMLModel.reiEndpointStatus- The current status of the real-time endpoint for theMLModel. This element can have one of the following values: *NONE- Endpoint does not exist or was previously deleted. *READY- Endpoint is ready to be used for real-time predictions. *UPDATING- Updating/creating the endpoint.reiPeakRequestsPerSecond- The maximum processing rate for the real-time endpoint forMLModel, measured in incoming requests per second.
reiCreatedAt :: Lens' RealtimeEndpointInfo (Maybe UTCTime) Source #
The time that the request to create the real-time endpoint for the MLModel was received. The time is expressed in epoch time.
reiEndpointURL :: Lens' RealtimeEndpointInfo (Maybe Text) Source #
The URI that specifies where to send real-time prediction requests for the MLModel .
reiEndpointStatus :: Lens' RealtimeEndpointInfo (Maybe RealtimeEndpointStatus) Source #
The current status of the real-time endpoint for the MLModel . This element can have one of the following values: * NONE - Endpoint does not exist or was previously deleted. * READY - Endpoint is ready to be used for real-time predictions. * UPDATING - Updating/creating the endpoint.
reiPeakRequestsPerSecond :: Lens' RealtimeEndpointInfo (Maybe Int) Source #
The maximum processing rate for the real-time endpoint for MLModel , measured in incoming requests per second.
RedshiftDataSpec
data RedshiftDataSpec Source #
Describes the data specification of an Amazon Redshift DataSource .
See: redshiftDataSpec smart constructor.
Instances
Arguments
| :: RedshiftDatabase | |
| -> Text | |
| -> RedshiftDatabaseCredentials | |
| -> Text | |
| -> RedshiftDataSpec |
Creates a value of RedshiftDataSpec with the minimum fields required to make a request.
Use one of the following lenses to modify other fields as desired:
rDataSchemaURI- Describes the schema location for an Amazon RedshiftDataSource.rDataSchema- A JSON string that represents the schema for an Amazon RedshiftDataSource. TheDataSchemadefines the structure of the observation data in the data file(s) referenced in theDataSource. ADataSchemais not required if you specify aDataSchemaUri. Define yourDataSchemaas a series of key-value pairs.attributesandexcludedVariableNameshave an array of key-value pairs for their value. Use the following format to define yourDataSchema. { "version": "1.0", "recordAnnotationFieldName": F1, "recordWeightFieldName": F2, "targetFieldName": F3, "dataFormat": CSV, "dataFileContainsHeader": true, "attributes": [ { "fieldName": F1, "fieldType": TEXT }, { "fieldName": F2, "fieldType": NUMERIC }, { "fieldName": F3, "fieldType": CATEGORICAL }, { "fieldName": F4, "fieldType": NUMERIC }, { "fieldName": F5, "fieldType": CATEGORICAL }, { "fieldName": F6, "fieldType": TEXT }, { "fieldName": F7, "fieldType": WEIGHTED_INT_SEQUENCE }, { "fieldName": F8, "fieldType": WEIGHTED_STRING_SEQUENCE } ], "excludedVariableNames": [ F6 ] }rDataRearrangement- A JSON string that represents the splitting and rearrangement processing to be applied to aDataSource. If theDataRearrangementparameter is not provided, all of the input data is used to create theDatasource. There are multiple parameters that control what data is used to create a datasource: *percentBeginUsepercentBeginto indicate the beginning of the range of the data used to create the Datasource. If you do not includepercentBeginandpercentEnd, Amazon ML includes all of the data when creating the datasource. *percentEndUsepercentEndto indicate the end of the range of the data used to create the Datasource. If you do not includepercentBeginandpercentEnd, Amazon ML includes all of the data when creating the datasource. *complementThecomplementparameter instructs Amazon ML to use the data that is not included in the range ofpercentBegintopercentEndto create a datasource. Thecomplementparameter is useful if you need to create complementary datasources for training and evaluation. To create a complementary datasource, use the same values forpercentBeginandpercentEnd, along with thecomplementparameter. For example, the following two datasources do not share any data, and can be used to train and evaluate a model. The first datasource has 25 percent of the data, and the second one has 75 percent of the data. Datasource for evaluation:{"splitting":{"percentBegin":0, "percentEnd":25}}Datasource for training:{"splitting":{"percentBegin":0, "percentEnd":25, "complement":"true"}}*strategyTo change how Amazon ML splits the data for a datasource, use thestrategyparameter. The default value for thestrategyparameter issequential, meaning that Amazon ML takes all of the data records between thepercentBeginandpercentEndparameters for the datasource, in the order that the records appear in the input data. The following twoDataRearrangementlines are examples of sequentially ordered training and evaluation datasources: Datasource for evaluation:{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential"}}Datasource for training:{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential", "complement":"true"}}To randomly split the input data into the proportions indicated by the percentBegin and percentEnd parameters, set thestrategyparameter torandomand provide a string that is used as the seed value for the random data splitting (for example, you can use the S3 path to your data as the random seed string). If you choose the random split strategy, Amazon ML assigns each row of data a pseudo-random number between 0 and 100, and then selects the rows that have an assigned number betweenpercentBeginandpercentEnd. Pseudo-random numbers are assigned using both the input seed string value and the byte offset as a seed, so changing the data results in a different split. Any existing ordering is preserved. The random splitting strategy ensures that variables in the training and evaluation data are distributed similarly. It is useful in the cases where the input data may have an implicit sort order, which would otherwise result in training and evaluation datasources containing non-similar data records. The following twoDataRearrangementlines are examples of non-sequentially ordered training and evaluation datasources: Datasource for evaluation:{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3:/my_s3_pathbucket/file.csv"}}Datasource for training:{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3:/my_s3_pathbucket/file.csv", "complement":"true"}}rDatabaseInformation- Describes theDatabaseNameandClusterIdentifierfor an Amazon RedshiftDataSource.rSelectSqlQuery- Describes the SQL Query to execute on an Amazon Redshift database for an Amazon RedshiftDataSource.rDatabaseCredentials- Describes AWS Identity and Access Management (IAM) credentials that are used connect to the Amazon Redshift database.rS3StagingLocation- Describes an Amazon S3 location to store the result set of theSelectSqlQueryquery.
rDataSchemaURI :: Lens' RedshiftDataSpec (Maybe Text) Source #
Describes the schema location for an Amazon Redshift DataSource .
rDataSchema :: Lens' RedshiftDataSpec (Maybe Text) Source #
A JSON string that represents the schema for an Amazon Redshift DataSource . The DataSchema defines the structure of the observation data in the data file(s) referenced in the DataSource . A DataSchema is not required if you specify a DataSchemaUri . Define your DataSchema as a series of key-value pairs. attributes and excludedVariableNames have an array of key-value pairs for their value. Use the following format to define your DataSchema . { "version": "1.0", "recordAnnotationFieldName": F1, "recordWeightFieldName": F2, "targetFieldName": F3, "dataFormat": CSV, "dataFileContainsHeader": true, "attributes": [ { "fieldName": F1, "fieldType": TEXT }, { "fieldName": F2, "fieldType": NUMERIC }, { "fieldName": F3, "fieldType": CATEGORICAL }, { "fieldName": F4, "fieldType": NUMERIC }, { "fieldName": F5, "fieldType": CATEGORICAL }, { "fieldName": F6, "fieldType": TEXT }, { "fieldName": F7, "fieldType": WEIGHTED_INT_SEQUENCE }, { "fieldName": F8, "fieldType": WEIGHTED_STRING_SEQUENCE } ], "excludedVariableNames": [ F6 ] }
rDataRearrangement :: Lens' RedshiftDataSpec (Maybe Text) Source #
A JSON string that represents the splitting and rearrangement processing to be applied to a DataSource . If the DataRearrangement parameter is not provided, all of the input data is used to create the Datasource . There are multiple parameters that control what data is used to create a datasource: * percentBegin Use percentBegin to indicate the beginning of the range of the data used to create the Datasource. If you do not include percentBegin and percentEnd , Amazon ML includes all of the data when creating the datasource. * percentEnd Use percentEnd to indicate the end of the range of the data used to create the Datasource. If you do not include percentBegin and percentEnd , Amazon ML includes all of the data when creating the datasource. * complement The complement parameter instructs Amazon ML to use the data that is not included in the range of percentBegin to percentEnd to create a datasource. The complement parameter is useful if you need to create complementary datasources for training and evaluation. To create a complementary datasource, use the same values for percentBegin and percentEnd , along with the complement parameter. For example, the following two datasources do not share any data, and can be used to train and evaluate a model. The first datasource has 25 percent of the data, and the second one has 75 percent of the data. Datasource for evaluation: {"splitting":{"percentBegin":0, "percentEnd":25}} Datasource for training: {"splitting":{"percentBegin":0, "percentEnd":25, "complement":"true"}} * strategy To change how Amazon ML splits the data for a datasource, use the strategy parameter. The default value for the strategy parameter is sequential , meaning that Amazon ML takes all of the data records between the percentBegin and percentEnd parameters for the datasource, in the order that the records appear in the input data. The following two DataRearrangement lines are examples of sequentially ordered training and evaluation datasources: Datasource for evaluation: {"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential"}} Datasource for training: {"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential", "complement":"true"}} To randomly split the input data into the proportions indicated by the percentBegin and percentEnd parameters, set the strategy parameter to random and provide a string that is used as the seed value for the random data splitting (for example, you can use the S3 path to your data as the random seed string). If you choose the random split strategy, Amazon ML assigns each row of data a pseudo-random number between 0 and 100, and then selects the rows that have an assigned number between percentBegin and percentEnd . Pseudo-random numbers are assigned using both the input seed string value and the byte offset as a seed, so changing the data results in a different split. Any existing ordering is preserved. The random splitting strategy ensures that variables in the training and evaluation data are distributed similarly. It is useful in the cases where the input data may have an implicit sort order, which would otherwise result in training and evaluation datasources containing non-similar data records. The following two DataRearrangement lines are examples of non-sequentially ordered training and evaluation datasources: Datasource for evaluation: {"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3:/my_s3_pathbucket/file.csv"}} Datasource for training: {"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3:/my_s3_pathbucket/file.csv", "complement":"true"}}
rDatabaseInformation :: Lens' RedshiftDataSpec RedshiftDatabase Source #
Describes the DatabaseName and ClusterIdentifier for an Amazon Redshift DataSource .
rSelectSqlQuery :: Lens' RedshiftDataSpec Text Source #
Describes the SQL Query to execute on an Amazon Redshift database for an Amazon Redshift DataSource .
rDatabaseCredentials :: Lens' RedshiftDataSpec RedshiftDatabaseCredentials Source #
Describes AWS Identity and Access Management (IAM) credentials that are used connect to the Amazon Redshift database.
rS3StagingLocation :: Lens' RedshiftDataSpec Text Source #
Describes an Amazon S3 location to store the result set of the SelectSqlQuery query.
RedshiftDatabase
data RedshiftDatabase Source #
Describes the database details required to connect to an Amazon Redshift database.
See: redshiftDatabase smart constructor.
Instances
Arguments
| :: Text | |
| -> Text | |
| -> RedshiftDatabase |
Creates a value of RedshiftDatabase with the minimum fields required to make a request.
Use one of the following lenses to modify other fields as desired:
rdDatabaseName- Undocumented member.rdClusterIdentifier- Undocumented member.
rdDatabaseName :: Lens' RedshiftDatabase Text Source #
Undocumented member.
rdClusterIdentifier :: Lens' RedshiftDatabase Text Source #
Undocumented member.
RedshiftDatabaseCredentials
data RedshiftDatabaseCredentials Source #
Describes the database credentials for connecting to a database on an Amazon Redshift cluster.
See: redshiftDatabaseCredentials smart constructor.
Instances
redshiftDatabaseCredentials Source #
Arguments
| :: Text | |
| -> Text | |
| -> RedshiftDatabaseCredentials |
Creates a value of RedshiftDatabaseCredentials with the minimum fields required to make a request.
Use one of the following lenses to modify other fields as desired:
rdcUsername- Undocumented member.rdcPassword- Undocumented member.
rdcUsername :: Lens' RedshiftDatabaseCredentials Text Source #
Undocumented member.
rdcPassword :: Lens' RedshiftDatabaseCredentials Text Source #
Undocumented member.
RedshiftMetadata
data RedshiftMetadata Source #
Describes the DataSource details specific to Amazon Redshift.
See: redshiftMetadata smart constructor.
Instances
redshiftMetadata :: RedshiftMetadata Source #
Creates a value of RedshiftMetadata with the minimum fields required to make a request.
Use one of the following lenses to modify other fields as desired:
redSelectSqlQuery- The SQL query that is specified duringCreateDataSourceFromRedshift. Returns only ifVerboseis true in GetDataSourceInput.redRedshiftDatabase- Undocumented member.redDatabaseUserName- Undocumented member.
redSelectSqlQuery :: Lens' RedshiftMetadata (Maybe Text) Source #
The SQL query that is specified during CreateDataSourceFromRedshift . Returns only if Verbose is true in GetDataSourceInput.
redRedshiftDatabase :: Lens' RedshiftMetadata (Maybe RedshiftDatabase) Source #
Undocumented member.
redDatabaseUserName :: Lens' RedshiftMetadata (Maybe Text) Source #
Undocumented member.
S3DataSpec
data S3DataSpec Source #
Describes the data specification of a DataSource .
See: s3DataSpec smart constructor.
Instances
Arguments
| :: Text | |
| -> S3DataSpec |
Creates a value of S3DataSpec with the minimum fields required to make a request.
Use one of the following lenses to modify other fields as desired:
sdsDataSchema- A JSON string that represents the schema for an Amazon S3DataSource. TheDataSchemadefines the structure of the observation data in the data file(s) referenced in theDataSource. You must provide either theDataSchemaor theDataSchemaLocationS3. Define yourDataSchemaas a series of key-value pairs.attributesandexcludedVariableNameshave an array of key-value pairs for their value. Use the following format to define yourDataSchema. { "version": "1.0", "recordAnnotationFieldName": F1, "recordWeightFieldName": F2, "targetFieldName": F3, "dataFormat": CSV, "dataFileContainsHeader": true, "attributes": [ { "fieldName": F1, "fieldType": TEXT }, { "fieldName": F2, "fieldType": NUMERIC }, { "fieldName": F3, "fieldType": CATEGORICAL }, { "fieldName": F4, "fieldType": NUMERIC }, { "fieldName": F5, "fieldType": CATEGORICAL }, { "fieldName": F6, "fieldType": TEXT }, { "fieldName": F7, "fieldType": WEIGHTED_INT_SEQUENCE }, { "fieldName": F8, "fieldType": WEIGHTED_STRING_SEQUENCE } ], "excludedVariableNames": [ F6 ] }sdsDataSchemaLocationS3- Describes the schema location in Amazon S3. You must provide either theDataSchemaor theDataSchemaLocationS3.sdsDataRearrangement- A JSON string that represents the splitting and rearrangement processing to be applied to aDataSource. If theDataRearrangementparameter is not provided, all of the input data is used to create theDatasource. There are multiple parameters that control what data is used to create a datasource: *percentBeginUsepercentBeginto indicate the beginning of the range of the data used to create the Datasource. If you do not includepercentBeginandpercentEnd, Amazon ML includes all of the data when creating the datasource. *percentEndUsepercentEndto indicate the end of the range of the data used to create the Datasource. If you do not includepercentBeginandpercentEnd, Amazon ML includes all of the data when creating the datasource. *complementThecomplementparameter instructs Amazon ML to use the data that is not included in the range ofpercentBegintopercentEndto create a datasource. Thecomplementparameter is useful if you need to create complementary datasources for training and evaluation. To create a complementary datasource, use the same values forpercentBeginandpercentEnd, along with thecomplementparameter. For example, the following two datasources do not share any data, and can be used to train and evaluate a model. The first datasource has 25 percent of the data, and the second one has 75 percent of the data. Datasource for evaluation:{"splitting":{"percentBegin":0, "percentEnd":25}}Datasource for training:{"splitting":{"percentBegin":0, "percentEnd":25, "complement":"true"}}*strategyTo change how Amazon ML splits the data for a datasource, use thestrategyparameter. The default value for thestrategyparameter issequential, meaning that Amazon ML takes all of the data records between thepercentBeginandpercentEndparameters for the datasource, in the order that the records appear in the input data. The following twoDataRearrangementlines are examples of sequentially ordered training and evaluation datasources: Datasource for evaluation:{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential"}}Datasource for training:{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential", "complement":"true"}}To randomly split the input data into the proportions indicated by the percentBegin and percentEnd parameters, set thestrategyparameter torandomand provide a string that is used as the seed value for the random data splitting (for example, you can use the S3 path to your data as the random seed string). If you choose the random split strategy, Amazon ML assigns each row of data a pseudo-random number between 0 and 100, and then selects the rows that have an assigned number betweenpercentBeginandpercentEnd. Pseudo-random numbers are assigned using both the input seed string value and the byte offset as a seed, so changing the data results in a different split. Any existing ordering is preserved. The random splitting strategy ensures that variables in the training and evaluation data are distributed similarly. It is useful in the cases where the input data may have an implicit sort order, which would otherwise result in training and evaluation datasources containing non-similar data records. The following twoDataRearrangementlines are examples of non-sequentially ordered training and evaluation datasources: Datasource for evaluation:{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3:/my_s3_pathbucket/file.csv"}}Datasource for training:{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3:/my_s3_pathbucket/file.csv", "complement":"true"}}sdsDataLocationS3- The location of the data file(s) used by aDataSource. The URI specifies a data file or an Amazon Simple Storage Service (Amazon S3) directory or bucket containing data files.
sdsDataSchema :: Lens' S3DataSpec (Maybe Text) Source #
A JSON string that represents the schema for an Amazon S3 DataSource . The DataSchema defines the structure of the observation data in the data file(s) referenced in the DataSource . You must provide either the DataSchema or the DataSchemaLocationS3 . Define your DataSchema as a series of key-value pairs. attributes and excludedVariableNames have an array of key-value pairs for their value. Use the following format to define your DataSchema . { "version": "1.0", "recordAnnotationFieldName": F1, "recordWeightFieldName": F2, "targetFieldName": F3, "dataFormat": CSV, "dataFileContainsHeader": true, "attributes": [ { "fieldName": F1, "fieldType": TEXT }, { "fieldName": F2, "fieldType": NUMERIC }, { "fieldName": F3, "fieldType": CATEGORICAL }, { "fieldName": F4, "fieldType": NUMERIC }, { "fieldName": F5, "fieldType": CATEGORICAL }, { "fieldName": F6, "fieldType": TEXT }, { "fieldName": F7, "fieldType": WEIGHTED_INT_SEQUENCE }, { "fieldName": F8, "fieldType": WEIGHTED_STRING_SEQUENCE } ], "excludedVariableNames": [ F6 ] }
sdsDataSchemaLocationS3 :: Lens' S3DataSpec (Maybe Text) Source #
Describes the schema location in Amazon S3. You must provide either the DataSchema or the DataSchemaLocationS3 .
sdsDataRearrangement :: Lens' S3DataSpec (Maybe Text) Source #
A JSON string that represents the splitting and rearrangement processing to be applied to a DataSource . If the DataRearrangement parameter is not provided, all of the input data is used to create the Datasource . There are multiple parameters that control what data is used to create a datasource: * percentBegin Use percentBegin to indicate the beginning of the range of the data used to create the Datasource. If you do not include percentBegin and percentEnd , Amazon ML includes all of the data when creating the datasource. * percentEnd Use percentEnd to indicate the end of the range of the data used to create the Datasource. If you do not include percentBegin and percentEnd , Amazon ML includes all of the data when creating the datasource. * complement The complement parameter instructs Amazon ML to use the data that is not included in the range of percentBegin to percentEnd to create a datasource. The complement parameter is useful if you need to create complementary datasources for training and evaluation. To create a complementary datasource, use the same values for percentBegin and percentEnd , along with the complement parameter. For example, the following two datasources do not share any data, and can be used to train and evaluate a model. The first datasource has 25 percent of the data, and the second one has 75 percent of the data. Datasource for evaluation: {"splitting":{"percentBegin":0, "percentEnd":25}} Datasource for training: {"splitting":{"percentBegin":0, "percentEnd":25, "complement":"true"}} * strategy To change how Amazon ML splits the data for a datasource, use the strategy parameter. The default value for the strategy parameter is sequential , meaning that Amazon ML takes all of the data records between the percentBegin and percentEnd parameters for the datasource, in the order that the records appear in the input data. The following two DataRearrangement lines are examples of sequentially ordered training and evaluation datasources: Datasource for evaluation: {"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential"}} Datasource for training: {"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential", "complement":"true"}} To randomly split the input data into the proportions indicated by the percentBegin and percentEnd parameters, set the strategy parameter to random and provide a string that is used as the seed value for the random data splitting (for example, you can use the S3 path to your data as the random seed string). If you choose the random split strategy, Amazon ML assigns each row of data a pseudo-random number between 0 and 100, and then selects the rows that have an assigned number between percentBegin and percentEnd . Pseudo-random numbers are assigned using both the input seed string value and the byte offset as a seed, so changing the data results in a different split. Any existing ordering is preserved. The random splitting strategy ensures that variables in the training and evaluation data are distributed similarly. It is useful in the cases where the input data may have an implicit sort order, which would otherwise result in training and evaluation datasources containing non-similar data records. The following two DataRearrangement lines are examples of non-sequentially ordered training and evaluation datasources: Datasource for evaluation: {"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3:/my_s3_pathbucket/file.csv"}} Datasource for training: {"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3:/my_s3_pathbucket/file.csv", "complement":"true"}}
sdsDataLocationS3 :: Lens' S3DataSpec Text Source #
The location of the data file(s) used by a DataSource . The URI specifies a data file or an Amazon Simple Storage Service (Amazon S3) directory or bucket containing data files.
Tag
A custom key-value pair associated with an ML object, such as an ML model.
See: tag smart constructor.
Creates a value of Tag with the minimum fields required to make a request.
Use one of the following lenses to modify other fields as desired: