| Copyright | (c) 2013-2023 Brendan Hay |
|---|---|
| License | Mozilla Public License, v. 2.0. |
| Maintainer | Brendan Hay |
| Stability | auto-generated |
| Portability | non-portable (GHC extensions) |
| Safe Haskell | Safe-Inferred |
| Language | Haskell2010 |
Amazonka.MachineLearning.Types
Contents
- Service Configuration
- Errors
- Algorithm
- BatchPredictionFilterVariable
- DataSourceFilterVariable
- DetailsAttributes
- EntityStatus
- EvaluationFilterVariable
- MLModelFilterVariable
- MLModelType
- RealtimeEndpointStatus
- SortOrder
- TaggableResourceType
- BatchPrediction
- DataSource
- Evaluation
- MLModel
- PerformanceMetrics
- Prediction
- RDSDataSpec
- RDSDatabase
- RDSDatabaseCredentials
- RDSMetadata
- RealtimeEndpointInfo
- RedshiftDataSpec
- RedshiftDatabase
- RedshiftDatabaseCredentials
- RedshiftMetadata
- S3DataSpec
- Tag
Description
Synopsis
- defaultService :: Service
- _IdempotentParameterMismatchException :: AsError a => Fold a ServiceError
- _InternalServerException :: AsError a => Fold a ServiceError
- _InvalidInputException :: AsError a => Fold a ServiceError
- _InvalidTagException :: AsError a => Fold a ServiceError
- _LimitExceededException :: AsError a => Fold a ServiceError
- _PredictorNotMountedException :: AsError a => Fold a ServiceError
- _ResourceNotFoundException :: AsError a => Fold a ServiceError
- _TagLimitExceededException :: AsError a => Fold a ServiceError
- newtype Algorithm where
- Algorithm' { }
- pattern Algorithm_Sgd :: Algorithm
- newtype BatchPredictionFilterVariable where
- BatchPredictionFilterVariable' { }
- pattern BatchPredictionFilterVariable_CreatedAt :: BatchPredictionFilterVariable
- pattern BatchPredictionFilterVariable_DataSourceId :: BatchPredictionFilterVariable
- pattern BatchPredictionFilterVariable_DataURI :: BatchPredictionFilterVariable
- pattern BatchPredictionFilterVariable_IAMUser :: BatchPredictionFilterVariable
- pattern BatchPredictionFilterVariable_LastUpdatedAt :: BatchPredictionFilterVariable
- pattern BatchPredictionFilterVariable_MLModelId :: BatchPredictionFilterVariable
- pattern BatchPredictionFilterVariable_Name :: BatchPredictionFilterVariable
- pattern BatchPredictionFilterVariable_Status :: BatchPredictionFilterVariable
- newtype DataSourceFilterVariable where
- DataSourceFilterVariable' { }
- pattern DataSourceFilterVariable_CreatedAt :: DataSourceFilterVariable
- pattern DataSourceFilterVariable_DataLocationS3 :: DataSourceFilterVariable
- pattern DataSourceFilterVariable_IAMUser :: DataSourceFilterVariable
- pattern DataSourceFilterVariable_LastUpdatedAt :: DataSourceFilterVariable
- pattern DataSourceFilterVariable_Name :: DataSourceFilterVariable
- pattern DataSourceFilterVariable_Status :: DataSourceFilterVariable
- newtype DetailsAttributes where
- newtype EntityStatus where
- EntityStatus' { }
- pattern EntityStatus_COMPLETED :: EntityStatus
- pattern EntityStatus_DELETED :: EntityStatus
- pattern EntityStatus_FAILED :: EntityStatus
- pattern EntityStatus_INPROGRESS :: EntityStatus
- pattern EntityStatus_PENDING :: EntityStatus
- newtype EvaluationFilterVariable where
- EvaluationFilterVariable' { }
- pattern EvaluationFilterVariable_CreatedAt :: EvaluationFilterVariable
- pattern EvaluationFilterVariable_DataSourceId :: EvaluationFilterVariable
- pattern EvaluationFilterVariable_DataURI :: EvaluationFilterVariable
- pattern EvaluationFilterVariable_IAMUser :: EvaluationFilterVariable
- pattern EvaluationFilterVariable_LastUpdatedAt :: EvaluationFilterVariable
- pattern EvaluationFilterVariable_MLModelId :: EvaluationFilterVariable
- pattern EvaluationFilterVariable_Name :: EvaluationFilterVariable
- pattern EvaluationFilterVariable_Status :: EvaluationFilterVariable
- newtype MLModelFilterVariable where
- MLModelFilterVariable' { }
- pattern MLModelFilterVariable_Algorithm :: MLModelFilterVariable
- pattern MLModelFilterVariable_CreatedAt :: MLModelFilterVariable
- pattern MLModelFilterVariable_IAMUser :: MLModelFilterVariable
- pattern MLModelFilterVariable_LastUpdatedAt :: MLModelFilterVariable
- pattern MLModelFilterVariable_MLModelType :: MLModelFilterVariable
- pattern MLModelFilterVariable_Name :: MLModelFilterVariable
- pattern MLModelFilterVariable_RealtimeEndpointStatus :: MLModelFilterVariable
- pattern MLModelFilterVariable_Status :: MLModelFilterVariable
- pattern MLModelFilterVariable_TrainingDataSourceId :: MLModelFilterVariable
- pattern MLModelFilterVariable_TrainingDataURI :: MLModelFilterVariable
- newtype MLModelType where
- MLModelType' { }
- pattern MLModelType_BINARY :: MLModelType
- pattern MLModelType_MULTICLASS :: MLModelType
- pattern MLModelType_REGRESSION :: MLModelType
- newtype RealtimeEndpointStatus where
- newtype SortOrder where
- SortOrder' { }
- pattern SortOrder_Asc :: SortOrder
- pattern SortOrder_Dsc :: SortOrder
- newtype TaggableResourceType where
- data BatchPrediction = BatchPrediction' {
- batchPredictionDataSourceId :: Maybe Text
- batchPredictionId :: Maybe Text
- computeTime :: Maybe Integer
- createdAt :: Maybe POSIX
- createdByIamUser :: Maybe Text
- finishedAt :: Maybe POSIX
- inputDataLocationS3 :: Maybe Text
- invalidRecordCount :: Maybe Integer
- lastUpdatedAt :: Maybe POSIX
- mLModelId :: Maybe Text
- message :: Maybe Text
- name :: Maybe Text
- outputUri :: Maybe Text
- startedAt :: Maybe POSIX
- status :: Maybe EntityStatus
- totalRecordCount :: Maybe Integer
- newBatchPrediction :: BatchPrediction
- batchPrediction_batchPredictionDataSourceId :: Lens' BatchPrediction (Maybe Text)
- batchPrediction_batchPredictionId :: Lens' BatchPrediction (Maybe Text)
- batchPrediction_computeTime :: Lens' BatchPrediction (Maybe Integer)
- batchPrediction_createdAt :: Lens' BatchPrediction (Maybe UTCTime)
- batchPrediction_createdByIamUser :: Lens' BatchPrediction (Maybe Text)
- batchPrediction_finishedAt :: Lens' BatchPrediction (Maybe UTCTime)
- batchPrediction_inputDataLocationS3 :: Lens' BatchPrediction (Maybe Text)
- batchPrediction_invalidRecordCount :: Lens' BatchPrediction (Maybe Integer)
- batchPrediction_lastUpdatedAt :: Lens' BatchPrediction (Maybe UTCTime)
- batchPrediction_mLModelId :: Lens' BatchPrediction (Maybe Text)
- batchPrediction_message :: Lens' BatchPrediction (Maybe Text)
- batchPrediction_name :: Lens' BatchPrediction (Maybe Text)
- batchPrediction_outputUri :: Lens' BatchPrediction (Maybe Text)
- batchPrediction_startedAt :: Lens' BatchPrediction (Maybe UTCTime)
- batchPrediction_status :: Lens' BatchPrediction (Maybe EntityStatus)
- batchPrediction_totalRecordCount :: Lens' BatchPrediction (Maybe Integer)
- data DataSource = DataSource' {
- computeStatistics :: Maybe Bool
- computeTime :: Maybe Integer
- createdAt :: Maybe POSIX
- createdByIamUser :: Maybe Text
- dataLocationS3 :: Maybe Text
- dataRearrangement :: Maybe Text
- dataSizeInBytes :: Maybe Integer
- dataSourceId :: Maybe Text
- finishedAt :: Maybe POSIX
- lastUpdatedAt :: Maybe POSIX
- message :: Maybe Text
- name :: Maybe Text
- numberOfFiles :: Maybe Integer
- rDSMetadata :: Maybe RDSMetadata
- redshiftMetadata :: Maybe RedshiftMetadata
- roleARN :: Maybe Text
- startedAt :: Maybe POSIX
- status :: Maybe EntityStatus
- newDataSource :: DataSource
- dataSource_computeStatistics :: Lens' DataSource (Maybe Bool)
- dataSource_computeTime :: Lens' DataSource (Maybe Integer)
- dataSource_createdAt :: Lens' DataSource (Maybe UTCTime)
- dataSource_createdByIamUser :: Lens' DataSource (Maybe Text)
- dataSource_dataLocationS3 :: Lens' DataSource (Maybe Text)
- dataSource_dataRearrangement :: Lens' DataSource (Maybe Text)
- dataSource_dataSizeInBytes :: Lens' DataSource (Maybe Integer)
- dataSource_dataSourceId :: Lens' DataSource (Maybe Text)
- dataSource_finishedAt :: Lens' DataSource (Maybe UTCTime)
- dataSource_lastUpdatedAt :: Lens' DataSource (Maybe UTCTime)
- dataSource_message :: Lens' DataSource (Maybe Text)
- dataSource_name :: Lens' DataSource (Maybe Text)
- dataSource_numberOfFiles :: Lens' DataSource (Maybe Integer)
- dataSource_rDSMetadata :: Lens' DataSource (Maybe RDSMetadata)
- dataSource_redshiftMetadata :: Lens' DataSource (Maybe RedshiftMetadata)
- dataSource_roleARN :: Lens' DataSource (Maybe Text)
- dataSource_startedAt :: Lens' DataSource (Maybe UTCTime)
- dataSource_status :: Lens' DataSource (Maybe EntityStatus)
- data Evaluation = Evaluation' {
- computeTime :: Maybe Integer
- createdAt :: Maybe POSIX
- createdByIamUser :: Maybe Text
- evaluationDataSourceId :: Maybe Text
- evaluationId :: Maybe Text
- finishedAt :: Maybe POSIX
- inputDataLocationS3 :: Maybe Text
- lastUpdatedAt :: Maybe POSIX
- mLModelId :: Maybe Text
- message :: Maybe Text
- name :: Maybe Text
- performanceMetrics :: Maybe PerformanceMetrics
- startedAt :: Maybe POSIX
- status :: Maybe EntityStatus
- newEvaluation :: Evaluation
- evaluation_computeTime :: Lens' Evaluation (Maybe Integer)
- evaluation_createdAt :: Lens' Evaluation (Maybe UTCTime)
- evaluation_createdByIamUser :: Lens' Evaluation (Maybe Text)
- evaluation_evaluationDataSourceId :: Lens' Evaluation (Maybe Text)
- evaluation_evaluationId :: Lens' Evaluation (Maybe Text)
- evaluation_finishedAt :: Lens' Evaluation (Maybe UTCTime)
- evaluation_inputDataLocationS3 :: Lens' Evaluation (Maybe Text)
- evaluation_lastUpdatedAt :: Lens' Evaluation (Maybe UTCTime)
- evaluation_mLModelId :: Lens' Evaluation (Maybe Text)
- evaluation_message :: Lens' Evaluation (Maybe Text)
- evaluation_name :: Lens' Evaluation (Maybe Text)
- evaluation_performanceMetrics :: Lens' Evaluation (Maybe PerformanceMetrics)
- evaluation_startedAt :: Lens' Evaluation (Maybe UTCTime)
- evaluation_status :: Lens' Evaluation (Maybe EntityStatus)
- data MLModel = MLModel' {
- algorithm :: Maybe Algorithm
- computeTime :: Maybe Integer
- createdAt :: Maybe POSIX
- createdByIamUser :: Maybe Text
- endpointInfo :: Maybe RealtimeEndpointInfo
- finishedAt :: Maybe POSIX
- inputDataLocationS3 :: Maybe Text
- lastUpdatedAt :: Maybe POSIX
- mLModelId :: Maybe Text
- mLModelType :: Maybe MLModelType
- message :: Maybe Text
- name :: Maybe Text
- scoreThreshold :: Maybe Double
- scoreThresholdLastUpdatedAt :: Maybe POSIX
- sizeInBytes :: Maybe Integer
- startedAt :: Maybe POSIX
- status :: Maybe EntityStatus
- trainingDataSourceId :: Maybe Text
- trainingParameters :: Maybe (HashMap Text Text)
- newMLModel :: MLModel
- mLModel_algorithm :: Lens' MLModel (Maybe Algorithm)
- mLModel_computeTime :: Lens' MLModel (Maybe Integer)
- mLModel_createdAt :: Lens' MLModel (Maybe UTCTime)
- mLModel_createdByIamUser :: Lens' MLModel (Maybe Text)
- mLModel_endpointInfo :: Lens' MLModel (Maybe RealtimeEndpointInfo)
- mLModel_finishedAt :: Lens' MLModel (Maybe UTCTime)
- mLModel_inputDataLocationS3 :: Lens' MLModel (Maybe Text)
- mLModel_lastUpdatedAt :: Lens' MLModel (Maybe UTCTime)
- mLModel_mLModelId :: Lens' MLModel (Maybe Text)
- mLModel_mLModelType :: Lens' MLModel (Maybe MLModelType)
- mLModel_message :: Lens' MLModel (Maybe Text)
- mLModel_name :: Lens' MLModel (Maybe Text)
- mLModel_scoreThreshold :: Lens' MLModel (Maybe Double)
- mLModel_scoreThresholdLastUpdatedAt :: Lens' MLModel (Maybe UTCTime)
- mLModel_sizeInBytes :: Lens' MLModel (Maybe Integer)
- mLModel_startedAt :: Lens' MLModel (Maybe UTCTime)
- mLModel_status :: Lens' MLModel (Maybe EntityStatus)
- mLModel_trainingDataSourceId :: Lens' MLModel (Maybe Text)
- mLModel_trainingParameters :: Lens' MLModel (Maybe (HashMap Text Text))
- data PerformanceMetrics = PerformanceMetrics' {
- properties :: Maybe (HashMap Text Text)
- newPerformanceMetrics :: PerformanceMetrics
- performanceMetrics_properties :: Lens' PerformanceMetrics (Maybe (HashMap Text Text))
- data Prediction = Prediction' {}
- newPrediction :: Prediction
- prediction_details :: Lens' Prediction (Maybe (HashMap DetailsAttributes Text))
- prediction_predictedLabel :: Lens' Prediction (Maybe Text)
- prediction_predictedScores :: Lens' Prediction (Maybe (HashMap Text Double))
- prediction_predictedValue :: Lens' Prediction (Maybe Double)
- data RDSDataSpec = RDSDataSpec' {}
- newRDSDataSpec :: RDSDatabase -> Text -> RDSDatabaseCredentials -> Text -> Text -> Text -> Text -> RDSDataSpec
- rDSDataSpec_dataRearrangement :: Lens' RDSDataSpec (Maybe Text)
- rDSDataSpec_dataSchema :: Lens' RDSDataSpec (Maybe Text)
- rDSDataSpec_dataSchemaUri :: Lens' RDSDataSpec (Maybe Text)
- rDSDataSpec_databaseInformation :: Lens' RDSDataSpec RDSDatabase
- rDSDataSpec_selectSqlQuery :: Lens' RDSDataSpec Text
- rDSDataSpec_databaseCredentials :: Lens' RDSDataSpec RDSDatabaseCredentials
- rDSDataSpec_s3StagingLocation :: Lens' RDSDataSpec Text
- rDSDataSpec_resourceRole :: Lens' RDSDataSpec Text
- rDSDataSpec_serviceRole :: Lens' RDSDataSpec Text
- rDSDataSpec_subnetId :: Lens' RDSDataSpec Text
- rDSDataSpec_securityGroupIds :: Lens' RDSDataSpec [Text]
- data RDSDatabase = RDSDatabase' {}
- newRDSDatabase :: Text -> Text -> RDSDatabase
- rDSDatabase_instanceIdentifier :: Lens' RDSDatabase Text
- rDSDatabase_databaseName :: Lens' RDSDatabase Text
- data RDSDatabaseCredentials = RDSDatabaseCredentials' {}
- newRDSDatabaseCredentials :: Text -> Text -> RDSDatabaseCredentials
- rDSDatabaseCredentials_username :: Lens' RDSDatabaseCredentials Text
- rDSDatabaseCredentials_password :: Lens' RDSDatabaseCredentials Text
- data RDSMetadata = RDSMetadata' {}
- newRDSMetadata :: RDSMetadata
- rDSMetadata_dataPipelineId :: Lens' RDSMetadata (Maybe Text)
- rDSMetadata_database :: Lens' RDSMetadata (Maybe RDSDatabase)
- rDSMetadata_databaseUserName :: Lens' RDSMetadata (Maybe Text)
- rDSMetadata_resourceRole :: Lens' RDSMetadata (Maybe Text)
- rDSMetadata_selectSqlQuery :: Lens' RDSMetadata (Maybe Text)
- rDSMetadata_serviceRole :: Lens' RDSMetadata (Maybe Text)
- data RealtimeEndpointInfo = RealtimeEndpointInfo' {}
- newRealtimeEndpointInfo :: RealtimeEndpointInfo
- realtimeEndpointInfo_createdAt :: Lens' RealtimeEndpointInfo (Maybe UTCTime)
- realtimeEndpointInfo_endpointStatus :: Lens' RealtimeEndpointInfo (Maybe RealtimeEndpointStatus)
- realtimeEndpointInfo_endpointUrl :: Lens' RealtimeEndpointInfo (Maybe Text)
- realtimeEndpointInfo_peakRequestsPerSecond :: Lens' RealtimeEndpointInfo (Maybe Int)
- data RedshiftDataSpec = RedshiftDataSpec' {}
- newRedshiftDataSpec :: RedshiftDatabase -> Text -> RedshiftDatabaseCredentials -> Text -> RedshiftDataSpec
- redshiftDataSpec_dataRearrangement :: Lens' RedshiftDataSpec (Maybe Text)
- redshiftDataSpec_dataSchema :: Lens' RedshiftDataSpec (Maybe Text)
- redshiftDataSpec_dataSchemaUri :: Lens' RedshiftDataSpec (Maybe Text)
- redshiftDataSpec_databaseInformation :: Lens' RedshiftDataSpec RedshiftDatabase
- redshiftDataSpec_selectSqlQuery :: Lens' RedshiftDataSpec Text
- redshiftDataSpec_databaseCredentials :: Lens' RedshiftDataSpec RedshiftDatabaseCredentials
- redshiftDataSpec_s3StagingLocation :: Lens' RedshiftDataSpec Text
- data RedshiftDatabase = RedshiftDatabase' {}
- newRedshiftDatabase :: Text -> Text -> RedshiftDatabase
- redshiftDatabase_databaseName :: Lens' RedshiftDatabase Text
- redshiftDatabase_clusterIdentifier :: Lens' RedshiftDatabase Text
- data RedshiftDatabaseCredentials = RedshiftDatabaseCredentials' {}
- newRedshiftDatabaseCredentials :: Text -> Text -> RedshiftDatabaseCredentials
- redshiftDatabaseCredentials_username :: Lens' RedshiftDatabaseCredentials Text
- redshiftDatabaseCredentials_password :: Lens' RedshiftDatabaseCredentials Text
- data RedshiftMetadata = RedshiftMetadata' {}
- newRedshiftMetadata :: RedshiftMetadata
- redshiftMetadata_databaseUserName :: Lens' RedshiftMetadata (Maybe Text)
- redshiftMetadata_redshiftDatabase :: Lens' RedshiftMetadata (Maybe RedshiftDatabase)
- redshiftMetadata_selectSqlQuery :: Lens' RedshiftMetadata (Maybe Text)
- data S3DataSpec = S3DataSpec' {}
- newS3DataSpec :: Text -> S3DataSpec
- s3DataSpec_dataRearrangement :: Lens' S3DataSpec (Maybe Text)
- s3DataSpec_dataSchema :: Lens' S3DataSpec (Maybe Text)
- s3DataSpec_dataSchemaLocationS3 :: Lens' S3DataSpec (Maybe Text)
- s3DataSpec_dataLocationS3 :: Lens' S3DataSpec Text
- data Tag = Tag' {}
- newTag :: Tag
- tag_key :: Lens' Tag (Maybe Text)
- tag_value :: Lens' Tag (Maybe Text)
Service Configuration
defaultService :: Service Source #
API version 2014-12-12 of the Amazon Machine Learning SDK configuration.
Errors
_IdempotentParameterMismatchException :: AsError a => Fold a ServiceError Source #
A second request to use or change an object was not allowed. This can result from retrying a request using a parameter that was not present in the original request.
_InternalServerException :: AsError a => Fold a ServiceError Source #
An error on the server occurred when trying to process a request.
_InvalidInputException :: AsError a => Fold a ServiceError Source #
An error on the client occurred. Typically, the cause is an invalid input value.
_InvalidTagException :: AsError a => Fold a ServiceError Source #
Prism for InvalidTagException' errors.
_LimitExceededException :: AsError a => Fold a ServiceError Source #
The subscriber exceeded the maximum number of operations. This exception
can occur when listing objects such as DataSource.
_PredictorNotMountedException :: AsError a => Fold a ServiceError Source #
The exception is thrown when a predict request is made to an unmounted
MLModel.
_ResourceNotFoundException :: AsError a => Fold a ServiceError Source #
A specified resource cannot be located.
_TagLimitExceededException :: AsError a => Fold a ServiceError Source #
Prism for TagLimitExceededException' errors.
Algorithm
The function used to train an MLModel. Training choices supported by
Amazon ML include the following:
SGD- Stochastic Gradient Descent.RandomForest- Random forest of decision trees.
Constructors
| Algorithm' | |
Fields | |
Bundled Patterns
| pattern Algorithm_Sgd :: Algorithm |
Instances
BatchPredictionFilterVariable
newtype BatchPredictionFilterVariable Source #
A list of the variables to use in searching or filtering
BatchPrediction.
CreatedAt- Sets the search criteria toBatchPredictioncreation date.Status- Sets the search criteria toBatchPredictionstatus.Name- Sets the search criteria to the contents ofBatchPredictionName.IAMUser- Sets the search criteria to the user account that invoked theBatchPredictioncreation.MLModelId- Sets the search criteria to theMLModelused in theBatchPrediction.DataSourceId- Sets the search criteria to theDataSourceused in theBatchPrediction.DataURI- Sets the search criteria to the data file(s) used in theBatchPrediction. The URL can identify either a file or an Amazon Simple Storage Service (Amazon S3) bucket or directory.
Constructors
| BatchPredictionFilterVariable' | |
Fields | |
Bundled Patterns
Instances
DataSourceFilterVariable
newtype DataSourceFilterVariable Source #
A list of the variables to use in searching or filtering DataSource.
CreatedAt- Sets the search criteria toDataSourcecreation date.Status- Sets the search criteria toDataSourcestatus.Name- Sets the search criteria to the contents ofDataSourceName.DataUri- Sets the search criteria to the URI of data files used to create theDataSource. The URI can identify either a file or an Amazon Simple Storage Service (Amazon S3) bucket or directory.IAMUser- Sets the search criteria to the user account that invoked theDataSourcecreation.
Note: The variable names should match the variable names in the
DataSource.
Constructors
| DataSourceFilterVariable' | |
Fields | |
Bundled Patterns
Instances
DetailsAttributes
newtype DetailsAttributes Source #
Contains the key values of DetailsMap:
PredictiveModelType- Indicates the type of theMLModel.Algorithm- Indicates the algorithm that was used for theMLModel.
Constructors
| DetailsAttributes' | |
Fields | |
Bundled Patterns
| pattern DetailsAttributes_Algorithm :: DetailsAttributes | |
| pattern DetailsAttributes_PredictiveModelType :: DetailsAttributes |
Instances
EntityStatus
newtype EntityStatus Source #
Object status with the following possible values:
PENDING
INPROGRESS
FAILED
COMPLETED
DELETED
Constructors
| EntityStatus' | |
Fields | |
Bundled Patterns
| pattern EntityStatus_COMPLETED :: EntityStatus | |
| pattern EntityStatus_DELETED :: EntityStatus | |
| pattern EntityStatus_FAILED :: EntityStatus | |
| pattern EntityStatus_INPROGRESS :: EntityStatus | |
| pattern EntityStatus_PENDING :: EntityStatus |
Instances
EvaluationFilterVariable
newtype EvaluationFilterVariable Source #
A list of the variables to use in searching or filtering Evaluation.
CreatedAt- Sets the search criteria toEvaluationcreation date.Status- Sets the search criteria toEvaluationstatus.Name- Sets the search criteria to the contents ofEvaluation____Name.IAMUser- Sets the search criteria to the user account that invoked an evaluation.MLModelId- Sets the search criteria to thePredictorthat was evaluated.DataSourceId- Sets the search criteria to theDataSourceused in evaluation.DataUri- Sets the search criteria to the data file(s) used in evaluation. The URL can identify either a file or an Amazon Simple Storage Service (Amazon S3) bucket or directory.
Constructors
| EvaluationFilterVariable' | |
Fields | |
Bundled Patterns
Instances
MLModelFilterVariable
newtype MLModelFilterVariable Source #
Constructors
| MLModelFilterVariable' | |
Fields | |
Bundled Patterns
Instances
MLModelType
newtype MLModelType Source #
Constructors
| MLModelType' | |
Fields | |
Bundled Patterns
| pattern MLModelType_BINARY :: MLModelType | |
| pattern MLModelType_MULTICLASS :: MLModelType | |
| pattern MLModelType_REGRESSION :: MLModelType |
Instances
RealtimeEndpointStatus
newtype RealtimeEndpointStatus Source #
Constructors
| RealtimeEndpointStatus' | |
Fields | |
Bundled Patterns
| pattern RealtimeEndpointStatus_FAILED :: RealtimeEndpointStatus | |
| pattern RealtimeEndpointStatus_NONE :: RealtimeEndpointStatus | |
| pattern RealtimeEndpointStatus_READY :: RealtimeEndpointStatus | |
| pattern RealtimeEndpointStatus_UPDATING :: RealtimeEndpointStatus |
Instances
SortOrder
The sort order specified in a listing condition. Possible values include the following:
asc- Present the information in ascending order (from A-Z).dsc- Present the information in descending order (from Z-A).
Constructors
| SortOrder' | |
Fields | |
Bundled Patterns
| pattern SortOrder_Asc :: SortOrder | |
| pattern SortOrder_Dsc :: SortOrder |
Instances
TaggableResourceType
newtype TaggableResourceType Source #
Constructors
| TaggableResourceType' | |
Fields | |
Bundled Patterns
Instances
BatchPrediction
data BatchPrediction Source #
Represents the output of a GetBatchPrediction operation.
The content consists of the detailed metadata, the status, and the data
file information of a Batch Prediction.
See: newBatchPrediction smart constructor.
Constructors
| BatchPrediction' | |
Fields
| |
Instances
newBatchPrediction :: BatchPrediction Source #
Create a value of BatchPrediction with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:batchPredictionDataSourceId:BatchPrediction', batchPrediction_batchPredictionDataSourceId - The ID of the DataSource that points to the group of observations to
predict.
$sel:batchPredictionId:BatchPrediction', batchPrediction_batchPredictionId - The ID assigned to the BatchPrediction at creation. This value should
be identical to the value of the BatchPredictionID in the request.
$sel:computeTime:BatchPrediction', batchPrediction_computeTime - Undocumented member.
$sel:createdAt:BatchPrediction', batchPrediction_createdAt - The time that the BatchPrediction was created. The time is expressed
in epoch time.
$sel:createdByIamUser:BatchPrediction', batchPrediction_createdByIamUser - The AWS user account that invoked the BatchPrediction. The account
type can be either an AWS root account or an AWS Identity and Access
Management (IAM) user account.
$sel:finishedAt:BatchPrediction', batchPrediction_finishedAt - Undocumented member.
$sel:inputDataLocationS3:BatchPrediction', batchPrediction_inputDataLocationS3 - The location of the data file or directory in Amazon Simple Storage
Service (Amazon S3).
$sel:invalidRecordCount:BatchPrediction', batchPrediction_invalidRecordCount - Undocumented member.
$sel:lastUpdatedAt:BatchPrediction', batchPrediction_lastUpdatedAt - The time of the most recent edit to the BatchPrediction. The time is
expressed in epoch time.
$sel:mLModelId:BatchPrediction', batchPrediction_mLModelId - The ID of the MLModel that generated predictions for the
BatchPrediction request.
$sel:message:BatchPrediction', batchPrediction_message - A description of the most recent details about processing the batch
prediction request.
$sel:name:BatchPrediction', batchPrediction_name - A user-supplied name or description of the BatchPrediction.
$sel:outputUri:BatchPrediction', batchPrediction_outputUri - The location of an Amazon S3 bucket or directory to receive the
operation results. The following substrings are not allowed in the
s3 key portion of the outputURI field: ':', '//', '/./',
'/../'.
$sel:startedAt:BatchPrediction', batchPrediction_startedAt - Undocumented member.
$sel:status:BatchPrediction', batchPrediction_status - The status of the BatchPrediction. This element can have one of the
following values:
PENDING- Amazon Machine Learning (Amazon ML) submitted a request to generate predictions for a batch of observations.INPROGRESS- The process is underway.FAILED- The request to perform a batch prediction did not run to completion. It is not usable.COMPLETED- The batch prediction process completed successfully.DELETED- TheBatchPredictionis marked as deleted. It is not usable.
$sel:totalRecordCount:BatchPrediction', batchPrediction_totalRecordCount - Undocumented member.
batchPrediction_batchPredictionDataSourceId :: Lens' BatchPrediction (Maybe Text) Source #
The ID of the DataSource that points to the group of observations to
predict.
batchPrediction_batchPredictionId :: Lens' BatchPrediction (Maybe Text) Source #
The ID assigned to the BatchPrediction at creation. This value should
be identical to the value of the BatchPredictionID in the request.
batchPrediction_computeTime :: Lens' BatchPrediction (Maybe Integer) Source #
Undocumented member.
batchPrediction_createdAt :: Lens' BatchPrediction (Maybe UTCTime) Source #
The time that the BatchPrediction was created. The time is expressed
in epoch time.
batchPrediction_createdByIamUser :: Lens' BatchPrediction (Maybe Text) Source #
The AWS user account that invoked the BatchPrediction. The account
type can be either an AWS root account or an AWS Identity and Access
Management (IAM) user account.
batchPrediction_finishedAt :: Lens' BatchPrediction (Maybe UTCTime) Source #
Undocumented member.
batchPrediction_inputDataLocationS3 :: Lens' BatchPrediction (Maybe Text) Source #
The location of the data file or directory in Amazon Simple Storage Service (Amazon S3).
batchPrediction_invalidRecordCount :: Lens' BatchPrediction (Maybe Integer) Source #
Undocumented member.
batchPrediction_lastUpdatedAt :: Lens' BatchPrediction (Maybe UTCTime) Source #
The time of the most recent edit to the BatchPrediction. The time is
expressed in epoch time.
batchPrediction_mLModelId :: Lens' BatchPrediction (Maybe Text) Source #
The ID of the MLModel that generated predictions for the
BatchPrediction request.
batchPrediction_message :: Lens' BatchPrediction (Maybe Text) Source #
A description of the most recent details about processing the batch prediction request.
batchPrediction_name :: Lens' BatchPrediction (Maybe Text) Source #
A user-supplied name or description of the BatchPrediction.
batchPrediction_outputUri :: Lens' BatchPrediction (Maybe Text) Source #
The location of an Amazon S3 bucket or directory to receive the
operation results. The following substrings are not allowed in the
s3 key portion of the outputURI field: ':', '//', '/./',
'/../'.
batchPrediction_startedAt :: Lens' BatchPrediction (Maybe UTCTime) Source #
Undocumented member.
batchPrediction_status :: Lens' BatchPrediction (Maybe EntityStatus) Source #
The status of the BatchPrediction. This element can have one of the
following values:
PENDING- Amazon Machine Learning (Amazon ML) submitted a request to generate predictions for a batch of observations.INPROGRESS- The process is underway.FAILED- The request to perform a batch prediction did not run to completion. It is not usable.COMPLETED- The batch prediction process completed successfully.DELETED- TheBatchPredictionis marked as deleted. It is not usable.
batchPrediction_totalRecordCount :: Lens' BatchPrediction (Maybe Integer) Source #
Undocumented member.
DataSource
data DataSource Source #
Represents the output of the GetDataSource operation.
The content consists of the detailed metadata and data file information
and the current status of the DataSource.
See: newDataSource smart constructor.
Constructors
| DataSource' | |
Fields
| |
Instances
newDataSource :: DataSource Source #
Create a value of DataSource with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:computeStatistics:DataSource', dataSource_computeStatistics - The parameter is true if statistics need to be generated from the
observation data.
$sel:computeTime:DataSource', dataSource_computeTime - Undocumented member.
$sel:createdAt:DataSource', dataSource_createdAt - The time that the DataSource was created. The time is expressed in
epoch time.
$sel:createdByIamUser:DataSource', dataSource_createdByIamUser - The AWS user account from which the DataSource was created. The
account type can be either an AWS root account or an AWS Identity and
Access Management (IAM) user account.
$sel:dataLocationS3:DataSource', dataSource_dataLocationS3 - The location and name of the data in Amazon Simple Storage Service
(Amazon S3) that is used by a DataSource.
$sel:dataRearrangement:DataSource', dataSource_dataRearrangement - A JSON string that represents the splitting and rearrangement
requirement used when this DataSource was created.
$sel:dataSizeInBytes:DataSource', dataSource_dataSizeInBytes - The total number of observations contained in the data files that the
DataSource references.
$sel:dataSourceId:DataSource', dataSource_dataSourceId - The ID that is assigned to the DataSource during creation.
$sel:finishedAt:DataSource', dataSource_finishedAt - Undocumented member.
$sel:lastUpdatedAt:DataSource', dataSource_lastUpdatedAt - The time of the most recent edit to the BatchPrediction. The time is
expressed in epoch time.
$sel:message:DataSource', dataSource_message - A description of the most recent details about creating the
DataSource.
$sel:name:DataSource', dataSource_name - A user-supplied name or description of the DataSource.
$sel:numberOfFiles:DataSource', dataSource_numberOfFiles - The number of data files referenced by the DataSource.
$sel:rDSMetadata:DataSource', dataSource_rDSMetadata - Undocumented member.
$sel:redshiftMetadata:DataSource', dataSource_redshiftMetadata - Undocumented member.
$sel:roleARN:DataSource', dataSource_roleARN - Undocumented member.
$sel:startedAt:DataSource', dataSource_startedAt - Undocumented member.
$sel:status:DataSource', dataSource_status - The current status of the DataSource. This element can have one of the
following values:
- PENDING - Amazon Machine Learning (Amazon ML) submitted a request to
create a
DataSource. - INPROGRESS - The creation process is underway.
- FAILED - The request to create a
DataSourcedid not run to completion. It is not usable. - COMPLETED - The creation process completed successfully.
- DELETED - The
DataSourceis marked as deleted. It is not usable.
dataSource_computeStatistics :: Lens' DataSource (Maybe Bool) Source #
The parameter is true if statistics need to be generated from the
observation data.
dataSource_computeTime :: Lens' DataSource (Maybe Integer) Source #
Undocumented member.
dataSource_createdAt :: Lens' DataSource (Maybe UTCTime) Source #
The time that the DataSource was created. The time is expressed in
epoch time.
dataSource_createdByIamUser :: Lens' DataSource (Maybe Text) Source #
The AWS user account from which the DataSource was created. The
account type can be either an AWS root account or an AWS Identity and
Access Management (IAM) user account.
dataSource_dataLocationS3 :: Lens' DataSource (Maybe Text) Source #
The location and name of the data in Amazon Simple Storage Service
(Amazon S3) that is used by a DataSource.
dataSource_dataRearrangement :: Lens' DataSource (Maybe Text) Source #
A JSON string that represents the splitting and rearrangement
requirement used when this DataSource was created.
dataSource_dataSizeInBytes :: Lens' DataSource (Maybe Integer) Source #
The total number of observations contained in the data files that the
DataSource references.
dataSource_dataSourceId :: Lens' DataSource (Maybe Text) Source #
The ID that is assigned to the DataSource during creation.
dataSource_finishedAt :: Lens' DataSource (Maybe UTCTime) Source #
Undocumented member.
dataSource_lastUpdatedAt :: Lens' DataSource (Maybe UTCTime) Source #
The time of the most recent edit to the BatchPrediction. The time is
expressed in epoch time.
dataSource_message :: Lens' DataSource (Maybe Text) Source #
A description of the most recent details about creating the
DataSource.
dataSource_name :: Lens' DataSource (Maybe Text) Source #
A user-supplied name or description of the DataSource.
dataSource_numberOfFiles :: Lens' DataSource (Maybe Integer) Source #
The number of data files referenced by the DataSource.
dataSource_rDSMetadata :: Lens' DataSource (Maybe RDSMetadata) Source #
Undocumented member.
dataSource_redshiftMetadata :: Lens' DataSource (Maybe RedshiftMetadata) Source #
Undocumented member.
dataSource_roleARN :: Lens' DataSource (Maybe Text) Source #
Undocumented member.
dataSource_startedAt :: Lens' DataSource (Maybe UTCTime) Source #
Undocumented member.
dataSource_status :: Lens' DataSource (Maybe EntityStatus) Source #
The current status of the DataSource. This element can have one of the
following values:
- PENDING - Amazon Machine Learning (Amazon ML) submitted a request to
create a
DataSource. - INPROGRESS - The creation process is underway.
- FAILED - The request to create a
DataSourcedid not run to completion. It is not usable. - COMPLETED - The creation process completed successfully.
- DELETED - The
DataSourceis marked as deleted. It is not usable.
Evaluation
data Evaluation Source #
Represents the output of GetEvaluation operation.
The content consists of the detailed metadata and data file information
and the current status of the Evaluation.
See: newEvaluation smart constructor.
Constructors
| Evaluation' | |
Fields
| |
Instances
newEvaluation :: Evaluation Source #
Create a value of Evaluation with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:computeTime:Evaluation', evaluation_computeTime - Undocumented member.
$sel:createdAt:Evaluation', evaluation_createdAt - The time that the Evaluation was created. The time is expressed in
epoch time.
$sel:createdByIamUser:Evaluation', evaluation_createdByIamUser - The AWS user account that invoked the evaluation. The account type can
be either an AWS root account or an AWS Identity and Access Management
(IAM) user account.
$sel:evaluationDataSourceId:Evaluation', evaluation_evaluationDataSourceId - The ID of the DataSource that is used to evaluate the MLModel.
$sel:evaluationId:Evaluation', evaluation_evaluationId - The ID that is assigned to the Evaluation at creation.
$sel:finishedAt:Evaluation', evaluation_finishedAt - Undocumented member.
$sel:inputDataLocationS3:Evaluation', evaluation_inputDataLocationS3 - The location and name of the data in Amazon Simple Storage Server
(Amazon S3) that is used in the evaluation.
$sel:lastUpdatedAt:Evaluation', evaluation_lastUpdatedAt - The time of the most recent edit to the Evaluation. The time is
expressed in epoch time.
$sel:mLModelId:Evaluation', evaluation_mLModelId - The ID of the MLModel that is the focus of the evaluation.
$sel:message:Evaluation', evaluation_message - A description of the most recent details about evaluating the MLModel.
$sel:name:Evaluation', evaluation_name - A user-supplied name or description of the Evaluation.
$sel:performanceMetrics:Evaluation', evaluation_performanceMetrics - Measurements of how well the MLModel performed, using observations
referenced by the DataSource. One of the following metrics is
returned, based on the type of the MLModel:
- BinaryAUC: A binary
MLModeluses the Area Under the Curve (AUC) technique to measure performance. - RegressionRMSE: A regression
MLModeluses the Root Mean Square Error (RMSE) technique to measure performance. RMSE measures the difference between predicted and actual values for a single variable. - MulticlassAvgFScore: A multiclass
MLModeluses the F1 score technique to measure performance.
For more information about performance metrics, please see the Amazon Machine Learning Developer Guide.
$sel:startedAt:Evaluation', evaluation_startedAt - Undocumented member.
$sel:status:Evaluation', evaluation_status - The status of the evaluation. This element can have one of the following
values:
PENDING- Amazon Machine Learning (Amazon ML) submitted a request to evaluate anMLModel.INPROGRESS- The evaluation is underway.FAILED- The request to evaluate anMLModeldid not run to completion. It is not usable.COMPLETED- The evaluation process completed successfully.DELETED- TheEvaluationis marked as deleted. It is not usable.
evaluation_computeTime :: Lens' Evaluation (Maybe Integer) Source #
Undocumented member.
evaluation_createdAt :: Lens' Evaluation (Maybe UTCTime) Source #
The time that the Evaluation was created. The time is expressed in
epoch time.
evaluation_createdByIamUser :: Lens' Evaluation (Maybe Text) Source #
The AWS user account that invoked the evaluation. The account type can be either an AWS root account or an AWS Identity and Access Management (IAM) user account.
evaluation_evaluationDataSourceId :: Lens' Evaluation (Maybe Text) Source #
The ID of the DataSource that is used to evaluate the MLModel.
evaluation_evaluationId :: Lens' Evaluation (Maybe Text) Source #
The ID that is assigned to the Evaluation at creation.
evaluation_finishedAt :: Lens' Evaluation (Maybe UTCTime) Source #
Undocumented member.
evaluation_inputDataLocationS3 :: Lens' Evaluation (Maybe Text) Source #
The location and name of the data in Amazon Simple Storage Server (Amazon S3) that is used in the evaluation.
evaluation_lastUpdatedAt :: Lens' Evaluation (Maybe UTCTime) Source #
The time of the most recent edit to the Evaluation. The time is
expressed in epoch time.
evaluation_mLModelId :: Lens' Evaluation (Maybe Text) Source #
The ID of the MLModel that is the focus of the evaluation.
evaluation_message :: Lens' Evaluation (Maybe Text) Source #
A description of the most recent details about evaluating the MLModel.
evaluation_name :: Lens' Evaluation (Maybe Text) Source #
A user-supplied name or description of the Evaluation.
evaluation_performanceMetrics :: Lens' Evaluation (Maybe PerformanceMetrics) Source #
Measurements of how well the MLModel performed, using observations
referenced by the DataSource. One of the following metrics is
returned, based on the type of the MLModel:
- BinaryAUC: A binary
MLModeluses the Area Under the Curve (AUC) technique to measure performance. - RegressionRMSE: A regression
MLModeluses the Root Mean Square Error (RMSE) technique to measure performance. RMSE measures the difference between predicted and actual values for a single variable. - MulticlassAvgFScore: A multiclass
MLModeluses the F1 score technique to measure performance.
For more information about performance metrics, please see the Amazon Machine Learning Developer Guide.
evaluation_startedAt :: Lens' Evaluation (Maybe UTCTime) Source #
Undocumented member.
evaluation_status :: Lens' Evaluation (Maybe EntityStatus) Source #
The status of the evaluation. This element can have one of the following values:
PENDING- Amazon Machine Learning (Amazon ML) submitted a request to evaluate anMLModel.INPROGRESS- The evaluation is underway.FAILED- The request to evaluate anMLModeldid not run to completion. It is not usable.COMPLETED- The evaluation process completed successfully.DELETED- TheEvaluationis marked as deleted. It is not usable.
MLModel
Represents the output of a GetMLModel operation.
The content consists of the detailed metadata and the current status of
the MLModel.
See: newMLModel smart constructor.
Constructors
| MLModel' | |
Fields
| |
Instances
newMLModel :: MLModel Source #
Create a value of MLModel with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:algorithm:MLModel', mLModel_algorithm - The algorithm used to train the MLModel. The following algorithm is
supported:
SGD-- Stochastic gradient descent. The goal ofSGDis to minimize the gradient of the loss function.
$sel:computeTime:MLModel', mLModel_computeTime - Undocumented member.
MLModel, mLModel_createdAt - The time that the MLModel was created. The time is expressed in epoch
time.
$sel:createdByIamUser:MLModel', mLModel_createdByIamUser - The AWS user account from which the MLModel was created. The account
type can be either an AWS root account or an AWS Identity and Access
Management (IAM) user account.
$sel:endpointInfo:MLModel', mLModel_endpointInfo - The current endpoint of the MLModel.
$sel:finishedAt:MLModel', mLModel_finishedAt - Undocumented member.
$sel:inputDataLocationS3:MLModel', mLModel_inputDataLocationS3 - The location of the data file or directory in Amazon Simple Storage
Service (Amazon S3).
$sel:lastUpdatedAt:MLModel', mLModel_lastUpdatedAt - The time of the most recent edit to the MLModel. The time is expressed
in epoch time.
$sel:mLModelId:MLModel', mLModel_mLModelId - The ID assigned to the MLModel at creation.
$sel:mLModelType:MLModel', mLModel_mLModelType - Identifies the MLModel category. The following are the available
types:
REGRESSION- Produces a numeric result. For example, "What price should a house be listed at?"BINARY- Produces one of two possible results. For example, "Is this a child-friendly web site?".MULTICLASS- Produces one of several possible results. For example, "Is this a HIGH-, LOW-, or MEDIUM-risk trade?".
$sel:message:MLModel', mLModel_message - A description of the most recent details about accessing the MLModel.
$sel:name:MLModel', mLModel_name - A user-supplied name or description of the MLModel.
$sel:scoreThreshold:MLModel', mLModel_scoreThreshold - Undocumented member.
$sel:scoreThresholdLastUpdatedAt:MLModel', mLModel_scoreThresholdLastUpdatedAt - The time of the most recent edit to the ScoreThreshold. The time is
expressed in epoch time.
$sel:sizeInBytes:MLModel', mLModel_sizeInBytes - Undocumented member.
$sel:startedAt:MLModel', mLModel_startedAt - Undocumented member.
$sel:status:MLModel', mLModel_status - The current status of an MLModel. This element can have one of the
following values:
PENDING- Amazon Machine Learning (Amazon ML) submitted a request to create anMLModel.INPROGRESS- The creation process is underway.FAILED- The request to create anMLModeldidn't run to completion. The model isn't usable.COMPLETED- The creation process completed successfully.DELETED- TheMLModelis marked as deleted. It isn't usable.
$sel:trainingDataSourceId:MLModel', mLModel_trainingDataSourceId - The ID of the training DataSource. The CreateMLModel operation uses
the TrainingDataSourceId.
$sel:trainingParameters:MLModel', mLModel_trainingParameters - A list of the training parameters in the MLModel. The list is
implemented as a map of key-value pairs.
The following is the current set of training parameters:
sgd.maxMLModelSizeInBytes- The maximum allowed size of the model. Depending on the input data, the size of the model might affect its performance.The value is an integer that ranges from
100000to2147483648. The default value is33554432.sgd.maxPasses- The number of times that the training process traverses the observations to build theMLModel. The value is an integer that ranges from1to10000. The default value is10.sgd.shuffleType- Whether Amazon ML shuffles the training data. Shuffling the data improves a model's ability to find the optimal solution for a variety of data types. The valid values areautoandnone. The default value isnone.sgd.l1RegularizationAmount- The coefficient regularization L1 norm, which controls overfitting the data by penalizing large coefficients. This parameter tends to drive coefficients to zero, resulting in sparse feature set. If you use this parameter, start by specifying a small value, such as1.0E-08.The value is a double that ranges from
0toMAX_DOUBLE. The default is to not use L1 normalization. This parameter can't be used whenL2is specified. Use this parameter sparingly.sgd.l2RegularizationAmount- The coefficient regularization L2 norm, which controls overfitting the data by penalizing large coefficients. This tends to drive coefficients to small, nonzero values. If you use this parameter, start by specifying a small value, such as1.0E-08.The value is a double that ranges from
0toMAX_DOUBLE. The default is to not use L2 normalization. This parameter can't be used whenL1is specified. Use this parameter sparingly.
mLModel_algorithm :: Lens' MLModel (Maybe Algorithm) Source #
The algorithm used to train the MLModel. The following algorithm is
supported:
SGD-- Stochastic gradient descent. The goal ofSGDis to minimize the gradient of the loss function.
mLModel_createdAt :: Lens' MLModel (Maybe UTCTime) Source #
The time that the MLModel was created. The time is expressed in epoch
time.
mLModel_createdByIamUser :: Lens' MLModel (Maybe Text) Source #
The AWS user account from which the MLModel was created. The account
type can be either an AWS root account or an AWS Identity and Access
Management (IAM) user account.
mLModel_endpointInfo :: Lens' MLModel (Maybe RealtimeEndpointInfo) Source #
The current endpoint of the MLModel.
mLModel_inputDataLocationS3 :: Lens' MLModel (Maybe Text) Source #
The location of the data file or directory in Amazon Simple Storage Service (Amazon S3).
mLModel_lastUpdatedAt :: Lens' MLModel (Maybe UTCTime) Source #
The time of the most recent edit to the MLModel. The time is expressed
in epoch time.
mLModel_mLModelType :: Lens' MLModel (Maybe MLModelType) Source #
Identifies the MLModel category. The following are the available
types:
REGRESSION- Produces a numeric result. For example, "What price should a house be listed at?"BINARY- Produces one of two possible results. For example, "Is this a child-friendly web site?".MULTICLASS- Produces one of several possible results. For example, "Is this a HIGH-, LOW-, or MEDIUM-risk trade?".
mLModel_message :: Lens' MLModel (Maybe Text) Source #
A description of the most recent details about accessing the MLModel.
mLModel_name :: Lens' MLModel (Maybe Text) Source #
A user-supplied name or description of the MLModel.
mLModel_scoreThresholdLastUpdatedAt :: Lens' MLModel (Maybe UTCTime) Source #
The time of the most recent edit to the ScoreThreshold. The time is
expressed in epoch time.
mLModel_status :: Lens' MLModel (Maybe EntityStatus) Source #
The current status of an MLModel. This element can have one of the
following values:
PENDING- Amazon Machine Learning (Amazon ML) submitted a request to create anMLModel.INPROGRESS- The creation process is underway.FAILED- The request to create anMLModeldidn't run to completion. The model isn't usable.COMPLETED- The creation process completed successfully.DELETED- TheMLModelis marked as deleted. It isn't usable.
mLModel_trainingDataSourceId :: Lens' MLModel (Maybe Text) Source #
The ID of the training DataSource. The CreateMLModel operation uses
the TrainingDataSourceId.
mLModel_trainingParameters :: Lens' MLModel (Maybe (HashMap Text Text)) Source #
A list of the training parameters in the MLModel. The list is
implemented as a map of key-value pairs.
The following is the current set of training parameters:
sgd.maxMLModelSizeInBytes- The maximum allowed size of the model. Depending on the input data, the size of the model might affect its performance.The value is an integer that ranges from
100000to2147483648. The default value is33554432.sgd.maxPasses- The number of times that the training process traverses the observations to build theMLModel. The value is an integer that ranges from1to10000. The default value is10.sgd.shuffleType- Whether Amazon ML shuffles the training data. Shuffling the data improves a model's ability to find the optimal solution for a variety of data types. The valid values areautoandnone. The default value isnone.sgd.l1RegularizationAmount- The coefficient regularization L1 norm, which controls overfitting the data by penalizing large coefficients. This parameter tends to drive coefficients to zero, resulting in sparse feature set. If you use this parameter, start by specifying a small value, such as1.0E-08.The value is a double that ranges from
0toMAX_DOUBLE. The default is to not use L1 normalization. This parameter can't be used whenL2is specified. Use this parameter sparingly.sgd.l2RegularizationAmount- The coefficient regularization L2 norm, which controls overfitting the data by penalizing large coefficients. This tends to drive coefficients to small, nonzero values. If you use this parameter, start by specifying a small value, such as1.0E-08.The value is a double that ranges from
0toMAX_DOUBLE. The default is to not use L2 normalization. This parameter can't be used whenL1is specified. Use this parameter sparingly.
PerformanceMetrics
data PerformanceMetrics Source #
Measurements of how well the MLModel performed on known observations.
One of the following metrics is returned, based on the type of the
MLModel:
- BinaryAUC: The binary
MLModeluses the Area Under the Curve (AUC) technique to measure performance. - RegressionRMSE: The regression
MLModeluses the Root Mean Square Error (RMSE) technique to measure performance. RMSE measures the difference between predicted and actual values for a single variable. - MulticlassAvgFScore: The multiclass
MLModeluses the F1 score technique to measure performance.
For more information about performance metrics, please see the Amazon Machine Learning Developer Guide.
See: newPerformanceMetrics smart constructor.
Constructors
| PerformanceMetrics' | |
Fields
| |
Instances
newPerformanceMetrics :: PerformanceMetrics Source #
Create a value of PerformanceMetrics with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:properties:PerformanceMetrics', performanceMetrics_properties - Undocumented member.
performanceMetrics_properties :: Lens' PerformanceMetrics (Maybe (HashMap Text Text)) Source #
Undocumented member.
Prediction
data Prediction Source #
The output from a Predict operation:
Details- Contains the following attributes:DetailsAttributes.PREDICTIVE_MODEL_TYPE - REGRESSION | BINARY | MULTICLASSDetailsAttributes.ALGORITHM - SGDPredictedLabel- Present for either aBINARYorMULTICLASSMLModelrequest.PredictedScores- Contains the raw classification score corresponding to each label.PredictedValue- Present for aREGRESSIONMLModelrequest.
See: newPrediction smart constructor.
Constructors
| Prediction' | |
Fields
| |
Instances
newPrediction :: Prediction Source #
Create a value of Prediction with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:details:Prediction', prediction_details - Undocumented member.
$sel:predictedLabel:Prediction', prediction_predictedLabel - The prediction label for either a BINARY or MULTICLASS MLModel.
$sel:predictedScores:Prediction', prediction_predictedScores - Undocumented member.
$sel:predictedValue:Prediction', prediction_predictedValue - The prediction value for REGRESSION MLModel.
prediction_details :: Lens' Prediction (Maybe (HashMap DetailsAttributes Text)) Source #
Undocumented member.
prediction_predictedLabel :: Lens' Prediction (Maybe Text) Source #
The prediction label for either a BINARY or MULTICLASS MLModel.
prediction_predictedScores :: Lens' Prediction (Maybe (HashMap Text Double)) Source #
Undocumented member.
prediction_predictedValue :: Lens' Prediction (Maybe Double) Source #
The prediction value for REGRESSION MLModel.
RDSDataSpec
data RDSDataSpec Source #
The data specification of an Amazon Relational Database Service (Amazon
RDS) DataSource.
See: newRDSDataSpec smart constructor.
Constructors
| RDSDataSpec' | |
Fields
| |
Instances
Arguments
| :: RDSDatabase | |
| -> Text | |
| -> RDSDatabaseCredentials | |
| -> Text | |
| -> Text | |
| -> Text | |
| -> Text | |
| -> RDSDataSpec |
Create a value of RDSDataSpec with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:dataRearrangement:RDSDataSpec', rDSDataSpec_dataRearrangement - A JSON string that represents the splitting and rearrangement processing
to be applied to a DataSource. If the DataRearrangement parameter is
not provided, all of the input data is used to create the Datasource.
There are multiple parameters that control what data is used to create a datasource:
percentBeginUse
percentBeginto indicate the beginning of the range of the data used to create the Datasource. If you do not includepercentBeginandpercentEnd, Amazon ML includes all of the data when creating the datasource.percentEndUse
percentEndto indicate the end of the range of the data used to create the Datasource. If you do not includepercentBeginandpercentEnd, Amazon ML includes all of the data when creating the datasource.complementThe
complementparameter instructs Amazon ML to use the data that is not included in the range ofpercentBegintopercentEndto create a datasource. Thecomplementparameter is useful if you need to create complementary datasources for training and evaluation. To create a complementary datasource, use the same values forpercentBeginandpercentEnd, along with thecomplementparameter.For example, the following two datasources do not share any data, and can be used to train and evaluate a model. The first datasource has 25 percent of the data, and the second one has 75 percent of the data.
Datasource for evaluation:
{"splitting":{"percentBegin":0, "percentEnd":25}}Datasource for training:
{"splitting":{"percentBegin":0, "percentEnd":25, "complement":"true"}}strategyTo change how Amazon ML splits the data for a datasource, use the
strategyparameter.The default value for the
strategyparameter issequential, meaning that Amazon ML takes all of the data records between thepercentBeginandpercentEndparameters for the datasource, in the order that the records appear in the input data.The following two
DataRearrangementlines are examples of sequentially ordered training and evaluation datasources:Datasource for evaluation:
{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential"}}Datasource for training:
{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential", "complement":"true"}}To randomly split the input data into the proportions indicated by the percentBegin and percentEnd parameters, set the
strategyparameter torandomand provide a string that is used as the seed value for the random data splitting (for example, you can use the S3 path to your data as the random seed string). If you choose the random split strategy, Amazon ML assigns each row of data a pseudo-random number between 0 and 100, and then selects the rows that have an assigned number betweenpercentBeginandpercentEnd. Pseudo-random numbers are assigned using both the input seed string value and the byte offset as a seed, so changing the data results in a different split. Any existing ordering is preserved. The random splitting strategy ensures that variables in the training and evaluation data are distributed similarly. It is useful in the cases where the input data may have an implicit sort order, which would otherwise result in training and evaluation datasources containing non-similar data records.The following two
DataRearrangementlines are examples of non-sequentially ordered training and evaluation datasources:Datasource for evaluation:
{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv"}}Datasource for training:
{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv", "complement":"true"}}
$sel:dataSchema:RDSDataSpec', rDSDataSpec_dataSchema - A JSON string that represents the schema for an Amazon RDS DataSource.
The DataSchema defines the structure of the observation data in the
data file(s) referenced in the DataSource.
A DataSchema is not required if you specify a DataSchemaUri
Define your DataSchema as a series of key-value pairs. attributes
and excludedVariableNames have an array of key-value pairs for their
value. Use the following format to define your DataSchema.
{ "version": "1.0",
"recordAnnotationFieldName": "F1",
"recordWeightFieldName": "F2",
"targetFieldName": "F3",
"dataFormat": "CSV",
"dataFileContainsHeader": true,
"attributes": [
{ "fieldName": "F1", "fieldType": "TEXT" }, { "fieldName": "F2", "fieldType": "NUMERIC" }, { "fieldName": "F3", "fieldType": "CATEGORICAL" }, { "fieldName": "F4", "fieldType": "NUMERIC" }, { "fieldName": "F5", "fieldType": "CATEGORICAL" }, { "fieldName": "F6", "fieldType": "TEXT" }, { "fieldName": "F7", "fieldType": "WEIGHTED_INT_SEQUENCE" }, { "fieldName": "F8", "fieldType": "WEIGHTED_STRING_SEQUENCE" } ],
"excludedVariableNames": [ "F6" ] }
$sel:dataSchemaUri:RDSDataSpec', rDSDataSpec_dataSchemaUri - The Amazon S3 location of the DataSchema.
$sel:databaseInformation:RDSDataSpec', rDSDataSpec_databaseInformation - Describes the DatabaseName and InstanceIdentifier of an Amazon RDS
database.
$sel:selectSqlQuery:RDSDataSpec', rDSDataSpec_selectSqlQuery - The query that is used to retrieve the observation data for the
DataSource.
$sel:databaseCredentials:RDSDataSpec', rDSDataSpec_databaseCredentials - The AWS Identity and Access Management (IAM) credentials that are used
connect to the Amazon RDS database.
$sel:s3StagingLocation:RDSDataSpec', rDSDataSpec_s3StagingLocation - The Amazon S3 location for staging Amazon RDS data. The data retrieved
from Amazon RDS using SelectSqlQuery is stored in this location.
$sel:resourceRole:RDSDataSpec', rDSDataSpec_resourceRole - The role (DataPipelineDefaultResourceRole) assumed by an Amazon Elastic
Compute Cloud (Amazon EC2) instance to carry out the copy operation from
Amazon RDS to an Amazon S3 task. For more information, see
Role templates
for data pipelines.
$sel:serviceRole:RDSDataSpec', rDSDataSpec_serviceRole - The role (DataPipelineDefaultRole) assumed by AWS Data Pipeline service
to monitor the progress of the copy task from Amazon RDS to Amazon S3.
For more information, see
Role templates
for data pipelines.
$sel:subnetId:RDSDataSpec', rDSDataSpec_subnetId - The subnet ID to be used to access a VPC-based RDS DB instance. This
attribute is used by Data Pipeline to carry out the copy task from
Amazon RDS to Amazon S3.
$sel:securityGroupIds:RDSDataSpec', rDSDataSpec_securityGroupIds - The security group IDs to be used to access a VPC-based RDS DB instance.
Ensure that there are appropriate ingress rules set up to allow access
to the RDS DB instance. This attribute is used by Data Pipeline to carry
out the copy operation from Amazon RDS to an Amazon S3 task.
rDSDataSpec_dataRearrangement :: Lens' RDSDataSpec (Maybe Text) Source #
A JSON string that represents the splitting and rearrangement processing
to be applied to a DataSource. If the DataRearrangement parameter is
not provided, all of the input data is used to create the Datasource.
There are multiple parameters that control what data is used to create a datasource:
percentBeginUse
percentBeginto indicate the beginning of the range of the data used to create the Datasource. If you do not includepercentBeginandpercentEnd, Amazon ML includes all of the data when creating the datasource.percentEndUse
percentEndto indicate the end of the range of the data used to create the Datasource. If you do not includepercentBeginandpercentEnd, Amazon ML includes all of the data when creating the datasource.complementThe
complementparameter instructs Amazon ML to use the data that is not included in the range ofpercentBegintopercentEndto create a datasource. Thecomplementparameter is useful if you need to create complementary datasources for training and evaluation. To create a complementary datasource, use the same values forpercentBeginandpercentEnd, along with thecomplementparameter.For example, the following two datasources do not share any data, and can be used to train and evaluate a model. The first datasource has 25 percent of the data, and the second one has 75 percent of the data.
Datasource for evaluation:
{"splitting":{"percentBegin":0, "percentEnd":25}}Datasource for training:
{"splitting":{"percentBegin":0, "percentEnd":25, "complement":"true"}}strategyTo change how Amazon ML splits the data for a datasource, use the
strategyparameter.The default value for the
strategyparameter issequential, meaning that Amazon ML takes all of the data records between thepercentBeginandpercentEndparameters for the datasource, in the order that the records appear in the input data.The following two
DataRearrangementlines are examples of sequentially ordered training and evaluation datasources:Datasource for evaluation:
{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential"}}Datasource for training:
{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential", "complement":"true"}}To randomly split the input data into the proportions indicated by the percentBegin and percentEnd parameters, set the
strategyparameter torandomand provide a string that is used as the seed value for the random data splitting (for example, you can use the S3 path to your data as the random seed string). If you choose the random split strategy, Amazon ML assigns each row of data a pseudo-random number between 0 and 100, and then selects the rows that have an assigned number betweenpercentBeginandpercentEnd. Pseudo-random numbers are assigned using both the input seed string value and the byte offset as a seed, so changing the data results in a different split. Any existing ordering is preserved. The random splitting strategy ensures that variables in the training and evaluation data are distributed similarly. It is useful in the cases where the input data may have an implicit sort order, which would otherwise result in training and evaluation datasources containing non-similar data records.The following two
DataRearrangementlines are examples of non-sequentially ordered training and evaluation datasources:Datasource for evaluation:
{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv"}}Datasource for training:
{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv", "complement":"true"}}
rDSDataSpec_dataSchema :: Lens' RDSDataSpec (Maybe Text) Source #
A JSON string that represents the schema for an Amazon RDS DataSource.
The DataSchema defines the structure of the observation data in the
data file(s) referenced in the DataSource.
A DataSchema is not required if you specify a DataSchemaUri
Define your DataSchema as a series of key-value pairs. attributes
and excludedVariableNames have an array of key-value pairs for their
value. Use the following format to define your DataSchema.
{ "version": "1.0",
"recordAnnotationFieldName": "F1",
"recordWeightFieldName": "F2",
"targetFieldName": "F3",
"dataFormat": "CSV",
"dataFileContainsHeader": true,
"attributes": [
{ "fieldName": "F1", "fieldType": "TEXT" }, { "fieldName": "F2", "fieldType": "NUMERIC" }, { "fieldName": "F3", "fieldType": "CATEGORICAL" }, { "fieldName": "F4", "fieldType": "NUMERIC" }, { "fieldName": "F5", "fieldType": "CATEGORICAL" }, { "fieldName": "F6", "fieldType": "TEXT" }, { "fieldName": "F7", "fieldType": "WEIGHTED_INT_SEQUENCE" }, { "fieldName": "F8", "fieldType": "WEIGHTED_STRING_SEQUENCE" } ],
"excludedVariableNames": [ "F6" ] }
rDSDataSpec_dataSchemaUri :: Lens' RDSDataSpec (Maybe Text) Source #
The Amazon S3 location of the DataSchema.
rDSDataSpec_databaseInformation :: Lens' RDSDataSpec RDSDatabase Source #
Describes the DatabaseName and InstanceIdentifier of an Amazon RDS
database.
rDSDataSpec_selectSqlQuery :: Lens' RDSDataSpec Text Source #
The query that is used to retrieve the observation data for the
DataSource.
rDSDataSpec_databaseCredentials :: Lens' RDSDataSpec RDSDatabaseCredentials Source #
The AWS Identity and Access Management (IAM) credentials that are used connect to the Amazon RDS database.
rDSDataSpec_s3StagingLocation :: Lens' RDSDataSpec Text Source #
The Amazon S3 location for staging Amazon RDS data. The data retrieved
from Amazon RDS using SelectSqlQuery is stored in this location.
rDSDataSpec_resourceRole :: Lens' RDSDataSpec Text Source #
The role (DataPipelineDefaultResourceRole) assumed by an Amazon Elastic Compute Cloud (Amazon EC2) instance to carry out the copy operation from Amazon RDS to an Amazon S3 task. For more information, see Role templates for data pipelines.
rDSDataSpec_serviceRole :: Lens' RDSDataSpec Text Source #
The role (DataPipelineDefaultRole) assumed by AWS Data Pipeline service to monitor the progress of the copy task from Amazon RDS to Amazon S3. For more information, see Role templates for data pipelines.
rDSDataSpec_subnetId :: Lens' RDSDataSpec Text Source #
The subnet ID to be used to access a VPC-based RDS DB instance. This attribute is used by Data Pipeline to carry out the copy task from Amazon RDS to Amazon S3.
rDSDataSpec_securityGroupIds :: Lens' RDSDataSpec [Text] Source #
The security group IDs to be used to access a VPC-based RDS DB instance. Ensure that there are appropriate ingress rules set up to allow access to the RDS DB instance. This attribute is used by Data Pipeline to carry out the copy operation from Amazon RDS to an Amazon S3 task.
RDSDatabase
data RDSDatabase Source #
The database details of an Amazon RDS database.
See: newRDSDatabase smart constructor.
Constructors
| RDSDatabase' | |
Fields
| |
Instances
Arguments
| :: Text | |
| -> Text | |
| -> RDSDatabase |
Create a value of RDSDatabase with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:instanceIdentifier:RDSDatabase', rDSDatabase_instanceIdentifier - The ID of an RDS DB instance.
$sel:databaseName:RDSDatabase', rDSDatabase_databaseName - Undocumented member.
rDSDatabase_instanceIdentifier :: Lens' RDSDatabase Text Source #
The ID of an RDS DB instance.
rDSDatabase_databaseName :: Lens' RDSDatabase Text Source #
Undocumented member.
RDSDatabaseCredentials
data RDSDatabaseCredentials Source #
The database credentials to connect to a database on an RDS DB instance.
See: newRDSDatabaseCredentials smart constructor.
Constructors
| RDSDatabaseCredentials' | |
Instances
newRDSDatabaseCredentials Source #
Arguments
| :: Text | |
| -> Text | |
| -> RDSDatabaseCredentials |
Create a value of RDSDatabaseCredentials with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:username:RDSDatabaseCredentials', rDSDatabaseCredentials_username - Undocumented member.
$sel:password:RDSDatabaseCredentials', rDSDatabaseCredentials_password - Undocumented member.
rDSDatabaseCredentials_username :: Lens' RDSDatabaseCredentials Text Source #
Undocumented member.
rDSDatabaseCredentials_password :: Lens' RDSDatabaseCredentials Text Source #
Undocumented member.
RDSMetadata
data RDSMetadata Source #
The datasource details that are specific to Amazon RDS.
See: newRDSMetadata smart constructor.
Constructors
| RDSMetadata' | |
Fields
| |
Instances
newRDSMetadata :: RDSMetadata Source #
Create a value of RDSMetadata with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:dataPipelineId:RDSMetadata', rDSMetadata_dataPipelineId - The ID of the Data Pipeline instance that is used to carry to copy data
from Amazon RDS to Amazon S3. You can use the ID to find details about
the instance in the Data Pipeline console.
$sel:database:RDSMetadata', rDSMetadata_database - The database details required to connect to an Amazon RDS.
$sel:databaseUserName:RDSMetadata', rDSMetadata_databaseUserName - Undocumented member.
$sel:resourceRole:RDSMetadata', rDSMetadata_resourceRole - The role (DataPipelineDefaultResourceRole) assumed by an Amazon EC2
instance to carry out the copy task from Amazon RDS to Amazon S3. For
more information, see
Role templates
for data pipelines.
$sel:selectSqlQuery:RDSMetadata', rDSMetadata_selectSqlQuery - The SQL query that is supplied during CreateDataSourceFromRDS. Returns
only if Verbose is true in GetDataSourceInput.
$sel:serviceRole:RDSMetadata', rDSMetadata_serviceRole - The role (DataPipelineDefaultRole) assumed by the Data Pipeline service
to monitor the progress of the copy task from Amazon RDS to Amazon S3.
For more information, see
Role templates
for data pipelines.
rDSMetadata_dataPipelineId :: Lens' RDSMetadata (Maybe Text) Source #
The ID of the Data Pipeline instance that is used to carry to copy data from Amazon RDS to Amazon S3. You can use the ID to find details about the instance in the Data Pipeline console.
rDSMetadata_database :: Lens' RDSMetadata (Maybe RDSDatabase) Source #
The database details required to connect to an Amazon RDS.
rDSMetadata_databaseUserName :: Lens' RDSMetadata (Maybe Text) Source #
Undocumented member.
rDSMetadata_resourceRole :: Lens' RDSMetadata (Maybe Text) Source #
The role (DataPipelineDefaultResourceRole) assumed by an Amazon EC2 instance to carry out the copy task from Amazon RDS to Amazon S3. For more information, see Role templates for data pipelines.
rDSMetadata_selectSqlQuery :: Lens' RDSMetadata (Maybe Text) Source #
The SQL query that is supplied during CreateDataSourceFromRDS. Returns
only if Verbose is true in GetDataSourceInput.
rDSMetadata_serviceRole :: Lens' RDSMetadata (Maybe Text) Source #
The role (DataPipelineDefaultRole) assumed by the Data Pipeline service to monitor the progress of the copy task from Amazon RDS to Amazon S3. For more information, see Role templates for data pipelines.
RealtimeEndpointInfo
data RealtimeEndpointInfo Source #
Describes the real-time endpoint information for an MLModel.
See: newRealtimeEndpointInfo smart constructor.
Constructors
| RealtimeEndpointInfo' | |
Fields
| |
Instances
newRealtimeEndpointInfo :: RealtimeEndpointInfo Source #
Create a value of RealtimeEndpointInfo with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:createdAt:RealtimeEndpointInfo', realtimeEndpointInfo_createdAt - The time that the request to create the real-time endpoint for the
MLModel was received. The time is expressed in epoch time.
$sel:endpointStatus:RealtimeEndpointInfo', realtimeEndpointInfo_endpointStatus - The current status of the real-time endpoint for the MLModel. This
element can have one of the following values:
NONE- Endpoint does not exist or was previously deleted.READY- Endpoint is ready to be used for real-time predictions.UPDATING- Updating/creating the endpoint.
$sel:endpointUrl:RealtimeEndpointInfo', realtimeEndpointInfo_endpointUrl - The URI that specifies where to send real-time prediction requests for
the MLModel.
Note: The application must wait until the real-time endpoint is ready before using this URI.
$sel:peakRequestsPerSecond:RealtimeEndpointInfo', realtimeEndpointInfo_peakRequestsPerSecond - The maximum processing rate for the real-time endpoint for MLModel,
measured in incoming requests per second.
realtimeEndpointInfo_createdAt :: Lens' RealtimeEndpointInfo (Maybe UTCTime) Source #
The time that the request to create the real-time endpoint for the
MLModel was received. The time is expressed in epoch time.
realtimeEndpointInfo_endpointStatus :: Lens' RealtimeEndpointInfo (Maybe RealtimeEndpointStatus) Source #
The current status of the real-time endpoint for the MLModel. This
element can have one of the following values:
NONE- Endpoint does not exist or was previously deleted.READY- Endpoint is ready to be used for real-time predictions.UPDATING- Updating/creating the endpoint.
realtimeEndpointInfo_endpointUrl :: Lens' RealtimeEndpointInfo (Maybe Text) Source #
The URI that specifies where to send real-time prediction requests for
the MLModel.
Note: The application must wait until the real-time endpoint is ready before using this URI.
realtimeEndpointInfo_peakRequestsPerSecond :: Lens' RealtimeEndpointInfo (Maybe Int) Source #
The maximum processing rate for the real-time endpoint for MLModel,
measured in incoming requests per second.
RedshiftDataSpec
data RedshiftDataSpec Source #
Describes the data specification of an Amazon Redshift DataSource.
See: newRedshiftDataSpec smart constructor.
Constructors
| RedshiftDataSpec' | |
Fields
| |
Instances
Arguments
| :: RedshiftDatabase | |
| -> Text | |
| -> RedshiftDatabaseCredentials | |
| -> Text | |
| -> RedshiftDataSpec |
Create a value of RedshiftDataSpec with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:dataRearrangement:RedshiftDataSpec', redshiftDataSpec_dataRearrangement - A JSON string that represents the splitting and rearrangement processing
to be applied to a DataSource. If the DataRearrangement parameter is
not provided, all of the input data is used to create the Datasource.
There are multiple parameters that control what data is used to create a datasource:
percentBeginUse
percentBeginto indicate the beginning of the range of the data used to create the Datasource. If you do not includepercentBeginandpercentEnd, Amazon ML includes all of the data when creating the datasource.percentEndUse
percentEndto indicate the end of the range of the data used to create the Datasource. If you do not includepercentBeginandpercentEnd, Amazon ML includes all of the data when creating the datasource.complementThe
complementparameter instructs Amazon ML to use the data that is not included in the range ofpercentBegintopercentEndto create a datasource. Thecomplementparameter is useful if you need to create complementary datasources for training and evaluation. To create a complementary datasource, use the same values forpercentBeginandpercentEnd, along with thecomplementparameter.For example, the following two datasources do not share any data, and can be used to train and evaluate a model. The first datasource has 25 percent of the data, and the second one has 75 percent of the data.
Datasource for evaluation:
{"splitting":{"percentBegin":0, "percentEnd":25}}Datasource for training:
{"splitting":{"percentBegin":0, "percentEnd":25, "complement":"true"}}strategyTo change how Amazon ML splits the data for a datasource, use the
strategyparameter.The default value for the
strategyparameter issequential, meaning that Amazon ML takes all of the data records between thepercentBeginandpercentEndparameters for the datasource, in the order that the records appear in the input data.The following two
DataRearrangementlines are examples of sequentially ordered training and evaluation datasources:Datasource for evaluation:
{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential"}}Datasource for training:
{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential", "complement":"true"}}To randomly split the input data into the proportions indicated by the percentBegin and percentEnd parameters, set the
strategyparameter torandomand provide a string that is used as the seed value for the random data splitting (for example, you can use the S3 path to your data as the random seed string). If you choose the random split strategy, Amazon ML assigns each row of data a pseudo-random number between 0 and 100, and then selects the rows that have an assigned number betweenpercentBeginandpercentEnd. Pseudo-random numbers are assigned using both the input seed string value and the byte offset as a seed, so changing the data results in a different split. Any existing ordering is preserved. The random splitting strategy ensures that variables in the training and evaluation data are distributed similarly. It is useful in the cases where the input data may have an implicit sort order, which would otherwise result in training and evaluation datasources containing non-similar data records.The following two
DataRearrangementlines are examples of non-sequentially ordered training and evaluation datasources:Datasource for evaluation:
{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv"}}Datasource for training:
{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv", "complement":"true"}}
$sel:dataSchema:RedshiftDataSpec', redshiftDataSpec_dataSchema - A JSON string that represents the schema for an Amazon Redshift
DataSource. The DataSchema defines the structure of the observation
data in the data file(s) referenced in the DataSource.
A DataSchema is not required if you specify a DataSchemaUri.
Define your DataSchema as a series of key-value pairs. attributes
and excludedVariableNames have an array of key-value pairs for their
value. Use the following format to define your DataSchema.
{ "version": "1.0",
"recordAnnotationFieldName": "F1",
"recordWeightFieldName": "F2",
"targetFieldName": "F3",
"dataFormat": "CSV",
"dataFileContainsHeader": true,
"attributes": [
{ "fieldName": "F1", "fieldType": "TEXT" }, { "fieldName": "F2", "fieldType": "NUMERIC" }, { "fieldName": "F3", "fieldType": "CATEGORICAL" }, { "fieldName": "F4", "fieldType": "NUMERIC" }, { "fieldName": "F5", "fieldType": "CATEGORICAL" }, { "fieldName": "F6", "fieldType": "TEXT" }, { "fieldName": "F7", "fieldType": "WEIGHTED_INT_SEQUENCE" }, { "fieldName": "F8", "fieldType": "WEIGHTED_STRING_SEQUENCE" } ],
"excludedVariableNames": [ "F6" ] }
$sel:dataSchemaUri:RedshiftDataSpec', redshiftDataSpec_dataSchemaUri - Describes the schema location for an Amazon Redshift DataSource.
$sel:databaseInformation:RedshiftDataSpec', redshiftDataSpec_databaseInformation - Describes the DatabaseName and ClusterIdentifier for an Amazon
Redshift DataSource.
$sel:selectSqlQuery:RedshiftDataSpec', redshiftDataSpec_selectSqlQuery - Describes the SQL Query to execute on an Amazon Redshift database for an
Amazon Redshift DataSource.
$sel:databaseCredentials:RedshiftDataSpec', redshiftDataSpec_databaseCredentials - Describes AWS Identity and Access Management (IAM) credentials that are
used connect to the Amazon Redshift database.
$sel:s3StagingLocation:RedshiftDataSpec', redshiftDataSpec_s3StagingLocation - Describes an Amazon S3 location to store the result set of the
SelectSqlQuery query.
redshiftDataSpec_dataRearrangement :: Lens' RedshiftDataSpec (Maybe Text) Source #
A JSON string that represents the splitting and rearrangement processing
to be applied to a DataSource. If the DataRearrangement parameter is
not provided, all of the input data is used to create the Datasource.
There are multiple parameters that control what data is used to create a datasource:
percentBeginUse
percentBeginto indicate the beginning of the range of the data used to create the Datasource. If you do not includepercentBeginandpercentEnd, Amazon ML includes all of the data when creating the datasource.percentEndUse
percentEndto indicate the end of the range of the data used to create the Datasource. If you do not includepercentBeginandpercentEnd, Amazon ML includes all of the data when creating the datasource.complementThe
complementparameter instructs Amazon ML to use the data that is not included in the range ofpercentBegintopercentEndto create a datasource. Thecomplementparameter is useful if you need to create complementary datasources for training and evaluation. To create a complementary datasource, use the same values forpercentBeginandpercentEnd, along with thecomplementparameter.For example, the following two datasources do not share any data, and can be used to train and evaluate a model. The first datasource has 25 percent of the data, and the second one has 75 percent of the data.
Datasource for evaluation:
{"splitting":{"percentBegin":0, "percentEnd":25}}Datasource for training:
{"splitting":{"percentBegin":0, "percentEnd":25, "complement":"true"}}strategyTo change how Amazon ML splits the data for a datasource, use the
strategyparameter.The default value for the
strategyparameter issequential, meaning that Amazon ML takes all of the data records between thepercentBeginandpercentEndparameters for the datasource, in the order that the records appear in the input data.The following two
DataRearrangementlines are examples of sequentially ordered training and evaluation datasources:Datasource for evaluation:
{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential"}}Datasource for training:
{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential", "complement":"true"}}To randomly split the input data into the proportions indicated by the percentBegin and percentEnd parameters, set the
strategyparameter torandomand provide a string that is used as the seed value for the random data splitting (for example, you can use the S3 path to your data as the random seed string). If you choose the random split strategy, Amazon ML assigns each row of data a pseudo-random number between 0 and 100, and then selects the rows that have an assigned number betweenpercentBeginandpercentEnd. Pseudo-random numbers are assigned using both the input seed string value and the byte offset as a seed, so changing the data results in a different split. Any existing ordering is preserved. The random splitting strategy ensures that variables in the training and evaluation data are distributed similarly. It is useful in the cases where the input data may have an implicit sort order, which would otherwise result in training and evaluation datasources containing non-similar data records.The following two
DataRearrangementlines are examples of non-sequentially ordered training and evaluation datasources:Datasource for evaluation:
{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv"}}Datasource for training:
{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv", "complement":"true"}}
redshiftDataSpec_dataSchema :: Lens' RedshiftDataSpec (Maybe Text) Source #
A JSON string that represents the schema for an Amazon Redshift
DataSource. The DataSchema defines the structure of the observation
data in the data file(s) referenced in the DataSource.
A DataSchema is not required if you specify a DataSchemaUri.
Define your DataSchema as a series of key-value pairs. attributes
and excludedVariableNames have an array of key-value pairs for their
value. Use the following format to define your DataSchema.
{ "version": "1.0",
"recordAnnotationFieldName": "F1",
"recordWeightFieldName": "F2",
"targetFieldName": "F3",
"dataFormat": "CSV",
"dataFileContainsHeader": true,
"attributes": [
{ "fieldName": "F1", "fieldType": "TEXT" }, { "fieldName": "F2", "fieldType": "NUMERIC" }, { "fieldName": "F3", "fieldType": "CATEGORICAL" }, { "fieldName": "F4", "fieldType": "NUMERIC" }, { "fieldName": "F5", "fieldType": "CATEGORICAL" }, { "fieldName": "F6", "fieldType": "TEXT" }, { "fieldName": "F7", "fieldType": "WEIGHTED_INT_SEQUENCE" }, { "fieldName": "F8", "fieldType": "WEIGHTED_STRING_SEQUENCE" } ],
"excludedVariableNames": [ "F6" ] }
redshiftDataSpec_dataSchemaUri :: Lens' RedshiftDataSpec (Maybe Text) Source #
Describes the schema location for an Amazon Redshift DataSource.
redshiftDataSpec_databaseInformation :: Lens' RedshiftDataSpec RedshiftDatabase Source #
Describes the DatabaseName and ClusterIdentifier for an Amazon
Redshift DataSource.
redshiftDataSpec_selectSqlQuery :: Lens' RedshiftDataSpec Text Source #
Describes the SQL Query to execute on an Amazon Redshift database for an
Amazon Redshift DataSource.
redshiftDataSpec_databaseCredentials :: Lens' RedshiftDataSpec RedshiftDatabaseCredentials Source #
Describes AWS Identity and Access Management (IAM) credentials that are used connect to the Amazon Redshift database.
redshiftDataSpec_s3StagingLocation :: Lens' RedshiftDataSpec Text Source #
Describes an Amazon S3 location to store the result set of the
SelectSqlQuery query.
RedshiftDatabase
data RedshiftDatabase Source #
Describes the database details required to connect to an Amazon Redshift database.
See: newRedshiftDatabase smart constructor.
Constructors
| RedshiftDatabase' | |
Fields | |
Instances
Arguments
| :: Text | |
| -> Text | |
| -> RedshiftDatabase |
Create a value of RedshiftDatabase with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:databaseName:RedshiftDatabase', redshiftDatabase_databaseName - Undocumented member.
$sel:clusterIdentifier:RedshiftDatabase', redshiftDatabase_clusterIdentifier - Undocumented member.
redshiftDatabase_databaseName :: Lens' RedshiftDatabase Text Source #
Undocumented member.
redshiftDatabase_clusterIdentifier :: Lens' RedshiftDatabase Text Source #
Undocumented member.
RedshiftDatabaseCredentials
data RedshiftDatabaseCredentials Source #
Describes the database credentials for connecting to a database on an Amazon Redshift cluster.
See: newRedshiftDatabaseCredentials smart constructor.
Constructors
| RedshiftDatabaseCredentials' | |
Instances
newRedshiftDatabaseCredentials Source #
Arguments
| :: Text | |
| -> Text | |
| -> RedshiftDatabaseCredentials |
Create a value of RedshiftDatabaseCredentials with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:username:RedshiftDatabaseCredentials', redshiftDatabaseCredentials_username - Undocumented member.
$sel:password:RedshiftDatabaseCredentials', redshiftDatabaseCredentials_password - Undocumented member.
redshiftDatabaseCredentials_username :: Lens' RedshiftDatabaseCredentials Text Source #
Undocumented member.
redshiftDatabaseCredentials_password :: Lens' RedshiftDatabaseCredentials Text Source #
Undocumented member.
RedshiftMetadata
data RedshiftMetadata Source #
Describes the DataSource details specific to Amazon Redshift.
See: newRedshiftMetadata smart constructor.
Constructors
| RedshiftMetadata' | |
Fields
| |
Instances
newRedshiftMetadata :: RedshiftMetadata Source #
Create a value of RedshiftMetadata with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:databaseUserName:RedshiftMetadata', redshiftMetadata_databaseUserName - Undocumented member.
$sel:redshiftDatabase:RedshiftMetadata', redshiftMetadata_redshiftDatabase - Undocumented member.
$sel:selectSqlQuery:RedshiftMetadata', redshiftMetadata_selectSqlQuery - The SQL query that is specified during CreateDataSourceFromRedshift.
Returns only if Verbose is true in GetDataSourceInput.
redshiftMetadata_databaseUserName :: Lens' RedshiftMetadata (Maybe Text) Source #
Undocumented member.
redshiftMetadata_redshiftDatabase :: Lens' RedshiftMetadata (Maybe RedshiftDatabase) Source #
Undocumented member.
redshiftMetadata_selectSqlQuery :: Lens' RedshiftMetadata (Maybe Text) Source #
The SQL query that is specified during CreateDataSourceFromRedshift.
Returns only if Verbose is true in GetDataSourceInput.
S3DataSpec
data S3DataSpec Source #
Describes the data specification of a DataSource.
See: newS3DataSpec smart constructor.
Constructors
| S3DataSpec' | |
Fields
| |
Instances
Arguments
| :: Text | |
| -> S3DataSpec |
Create a value of S3DataSpec with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:dataRearrangement:S3DataSpec', s3DataSpec_dataRearrangement - A JSON string that represents the splitting and rearrangement processing
to be applied to a DataSource. If the DataRearrangement parameter is
not provided, all of the input data is used to create the Datasource.
There are multiple parameters that control what data is used to create a datasource:
percentBeginUse
percentBeginto indicate the beginning of the range of the data used to create the Datasource. If you do not includepercentBeginandpercentEnd, Amazon ML includes all of the data when creating the datasource.percentEndUse
percentEndto indicate the end of the range of the data used to create the Datasource. If you do not includepercentBeginandpercentEnd, Amazon ML includes all of the data when creating the datasource.complementThe
complementparameter instructs Amazon ML to use the data that is not included in the range ofpercentBegintopercentEndto create a datasource. Thecomplementparameter is useful if you need to create complementary datasources for training and evaluation. To create a complementary datasource, use the same values forpercentBeginandpercentEnd, along with thecomplementparameter.For example, the following two datasources do not share any data, and can be used to train and evaluate a model. The first datasource has 25 percent of the data, and the second one has 75 percent of the data.
Datasource for evaluation:
{"splitting":{"percentBegin":0, "percentEnd":25}}Datasource for training:
{"splitting":{"percentBegin":0, "percentEnd":25, "complement":"true"}}strategyTo change how Amazon ML splits the data for a datasource, use the
strategyparameter.The default value for the
strategyparameter issequential, meaning that Amazon ML takes all of the data records between thepercentBeginandpercentEndparameters for the datasource, in the order that the records appear in the input data.The following two
DataRearrangementlines are examples of sequentially ordered training and evaluation datasources:Datasource for evaluation:
{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential"}}Datasource for training:
{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential", "complement":"true"}}To randomly split the input data into the proportions indicated by the percentBegin and percentEnd parameters, set the
strategyparameter torandomand provide a string that is used as the seed value for the random data splitting (for example, you can use the S3 path to your data as the random seed string). If you choose the random split strategy, Amazon ML assigns each row of data a pseudo-random number between 0 and 100, and then selects the rows that have an assigned number betweenpercentBeginandpercentEnd. Pseudo-random numbers are assigned using both the input seed string value and the byte offset as a seed, so changing the data results in a different split. Any existing ordering is preserved. The random splitting strategy ensures that variables in the training and evaluation data are distributed similarly. It is useful in the cases where the input data may have an implicit sort order, which would otherwise result in training and evaluation datasources containing non-similar data records.The following two
DataRearrangementlines are examples of non-sequentially ordered training and evaluation datasources:Datasource for evaluation:
{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv"}}Datasource for training:
{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv", "complement":"true"}}
$sel:dataSchema:S3DataSpec', s3DataSpec_dataSchema - A JSON string that represents the schema for an Amazon S3 DataSource.
The DataSchema defines the structure of the observation data in the
data file(s) referenced in the DataSource.
You must provide either the DataSchema or the DataSchemaLocationS3.
Define your DataSchema as a series of key-value pairs. attributes
and excludedVariableNames have an array of key-value pairs for their
value. Use the following format to define your DataSchema.
{ "version": "1.0",
"recordAnnotationFieldName": "F1",
"recordWeightFieldName": "F2",
"targetFieldName": "F3",
"dataFormat": "CSV",
"dataFileContainsHeader": true,
"attributes": [
{ "fieldName": "F1", "fieldType": "TEXT" }, { "fieldName": "F2", "fieldType": "NUMERIC" }, { "fieldName": "F3", "fieldType": "CATEGORICAL" }, { "fieldName": "F4", "fieldType": "NUMERIC" }, { "fieldName": "F5", "fieldType": "CATEGORICAL" }, { "fieldName": "F6", "fieldType": "TEXT" }, { "fieldName": "F7", "fieldType": "WEIGHTED_INT_SEQUENCE" }, { "fieldName": "F8", "fieldType": "WEIGHTED_STRING_SEQUENCE" } ],
"excludedVariableNames": [ "F6" ] }
$sel:dataSchemaLocationS3:S3DataSpec', s3DataSpec_dataSchemaLocationS3 - Describes the schema location in Amazon S3. You must provide either the
DataSchema or the DataSchemaLocationS3.
$sel:dataLocationS3:S3DataSpec', s3DataSpec_dataLocationS3 - The location of the data file(s) used by a DataSource. The URI
specifies a data file or an Amazon Simple Storage Service (Amazon S3)
directory or bucket containing data files.
s3DataSpec_dataRearrangement :: Lens' S3DataSpec (Maybe Text) Source #
A JSON string that represents the splitting and rearrangement processing
to be applied to a DataSource. If the DataRearrangement parameter is
not provided, all of the input data is used to create the Datasource.
There are multiple parameters that control what data is used to create a datasource:
percentBeginUse
percentBeginto indicate the beginning of the range of the data used to create the Datasource. If you do not includepercentBeginandpercentEnd, Amazon ML includes all of the data when creating the datasource.percentEndUse
percentEndto indicate the end of the range of the data used to create the Datasource. If you do not includepercentBeginandpercentEnd, Amazon ML includes all of the data when creating the datasource.complementThe
complementparameter instructs Amazon ML to use the data that is not included in the range ofpercentBegintopercentEndto create a datasource. Thecomplementparameter is useful if you need to create complementary datasources for training and evaluation. To create a complementary datasource, use the same values forpercentBeginandpercentEnd, along with thecomplementparameter.For example, the following two datasources do not share any data, and can be used to train and evaluate a model. The first datasource has 25 percent of the data, and the second one has 75 percent of the data.
Datasource for evaluation:
{"splitting":{"percentBegin":0, "percentEnd":25}}Datasource for training:
{"splitting":{"percentBegin":0, "percentEnd":25, "complement":"true"}}strategyTo change how Amazon ML splits the data for a datasource, use the
strategyparameter.The default value for the
strategyparameter issequential, meaning that Amazon ML takes all of the data records between thepercentBeginandpercentEndparameters for the datasource, in the order that the records appear in the input data.The following two
DataRearrangementlines are examples of sequentially ordered training and evaluation datasources:Datasource for evaluation:
{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential"}}Datasource for training:
{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"sequential", "complement":"true"}}To randomly split the input data into the proportions indicated by the percentBegin and percentEnd parameters, set the
strategyparameter torandomand provide a string that is used as the seed value for the random data splitting (for example, you can use the S3 path to your data as the random seed string). If you choose the random split strategy, Amazon ML assigns each row of data a pseudo-random number between 0 and 100, and then selects the rows that have an assigned number betweenpercentBeginandpercentEnd. Pseudo-random numbers are assigned using both the input seed string value and the byte offset as a seed, so changing the data results in a different split. Any existing ordering is preserved. The random splitting strategy ensures that variables in the training and evaluation data are distributed similarly. It is useful in the cases where the input data may have an implicit sort order, which would otherwise result in training and evaluation datasources containing non-similar data records.The following two
DataRearrangementlines are examples of non-sequentially ordered training and evaluation datasources:Datasource for evaluation:
{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv"}}Datasource for training:
{"splitting":{"percentBegin":70, "percentEnd":100, "strategy":"random", "randomSeed"="s3://my_s3_path/bucket/file.csv", "complement":"true"}}
s3DataSpec_dataSchema :: Lens' S3DataSpec (Maybe Text) Source #
A JSON string that represents the schema for an Amazon S3 DataSource.
The DataSchema defines the structure of the observation data in the
data file(s) referenced in the DataSource.
You must provide either the DataSchema or the DataSchemaLocationS3.
Define your DataSchema as a series of key-value pairs. attributes
and excludedVariableNames have an array of key-value pairs for their
value. Use the following format to define your DataSchema.
{ "version": "1.0",
"recordAnnotationFieldName": "F1",
"recordWeightFieldName": "F2",
"targetFieldName": "F3",
"dataFormat": "CSV",
"dataFileContainsHeader": true,
"attributes": [
{ "fieldName": "F1", "fieldType": "TEXT" }, { "fieldName": "F2", "fieldType": "NUMERIC" }, { "fieldName": "F3", "fieldType": "CATEGORICAL" }, { "fieldName": "F4", "fieldType": "NUMERIC" }, { "fieldName": "F5", "fieldType": "CATEGORICAL" }, { "fieldName": "F6", "fieldType": "TEXT" }, { "fieldName": "F7", "fieldType": "WEIGHTED_INT_SEQUENCE" }, { "fieldName": "F8", "fieldType": "WEIGHTED_STRING_SEQUENCE" } ],
"excludedVariableNames": [ "F6" ] }
s3DataSpec_dataSchemaLocationS3 :: Lens' S3DataSpec (Maybe Text) Source #
Describes the schema location in Amazon S3. You must provide either the
DataSchema or the DataSchemaLocationS3.
s3DataSpec_dataLocationS3 :: Lens' S3DataSpec Text Source #
The location of the data file(s) used by a DataSource. The URI
specifies a data file or an Amazon Simple Storage Service (Amazon S3)
directory or bucket containing data files.
Tag
A custom key-value pair associated with an ML object, such as an ML model.
See: newTag smart constructor.
Constructors
| Tag' | |
Fields
| |
Instances
| FromJSON Tag Source # | |
| ToJSON Tag Source # | |
Defined in Amazonka.MachineLearning.Types.Tag | |
| Generic Tag Source # | |
| Read Tag Source # | |
| Show Tag Source # | |
| NFData Tag Source # | |
Defined in Amazonka.MachineLearning.Types.Tag | |
| Eq Tag Source # | |
| Hashable Tag Source # | |
Defined in Amazonka.MachineLearning.Types.Tag | |
| type Rep Tag Source # | |
Defined in Amazonka.MachineLearning.Types.Tag type Rep Tag = D1 ('MetaData "Tag" "Amazonka.MachineLearning.Types.Tag" "amazonka-ml-2.0-A3JLJ63WvmfHxGBBIqhdRA" 'False) (C1 ('MetaCons "Tag'" 'PrefixI 'True) (S1 ('MetaSel ('Just "key") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Text)) :*: S1 ('MetaSel ('Just "value") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Text)))) | |
Create a value of Tag with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:key:Tag', tag_key - A unique identifier for the tag. Valid characters include Unicode
letters, digits, white space, _, ., /, =, +, -, %, and @.
$sel:value:Tag', tag_value - An optional string, typically used to describe or define the tag. Valid
characters include Unicode letters, digits, white space, _, ., /, =, +,
-, %, and @.