diff --git a/src/AWSServices.jl b/src/AWSServices.jl index 722e35bf38..d5c488c8da 100644 --- a/src/AWSServices.jl +++ b/src/AWSServices.jl @@ -388,6 +388,9 @@ const kinesis_video_media = AWS.RestJSONService( const kinesis_video_signaling = AWS.RestJSONService( "kinesisvideo", "kinesisvideo", "2019-12-04" ) +const kinesis_video_webrtc_storage = AWS.RestJSONService( + "kinesisvideo", "kinesisvideo", "2018-05-10" +) const kms = AWS.JSONService("kms", "kms", "2014-11-01", "1.1", "TrentService") const lakeformation = AWS.RestJSONService("lakeformation", "lakeformation", "2017-03-31") const lambda = AWS.RestJSONService("lambda", "lambda", "2015-03-31") @@ -398,6 +401,11 @@ const lex_runtime_v2 = AWS.RestJSONService("lex", "runtime-v2-lex", "2020-08-07" const license_manager = AWS.JSONService( "license-manager", "license-manager", "2018-08-01", "1.1", "AWSLicenseManager" ) +const license_manager_linux_subscriptions = AWS.RestJSONService( + "license-manager-linux-subscriptions", + "license-manager-linux-subscriptions", + "2018-05-10", +) const license_manager_user_subscriptions = AWS.RestJSONService( "license-manager-user-subscriptions", "license-manager-user-subscriptions", "2018-05-10" ) diff --git a/src/services/amplifybackend.jl b/src/services/amplifybackend.jl index 4d558b02c4..c5b83c11aa 100644 --- a/src/services/amplifybackend.jl +++ b/src/services/amplifybackend.jl @@ -697,7 +697,7 @@ end get_backend_apimodels(app_id, backend_environment_name, resource_name) get_backend_apimodels(app_id, backend_environment_name, resource_name, params::Dict{String,<:Any}) -Generates a model schema for existing backend API resource. +Gets a model introspection schema for an existing backend API resource. # Arguments - `app_id`: The app ID. diff --git a/src/services/api_gateway.jl b/src/services/api_gateway.jl index 2041ad7c56..c20684e5f8 100644 --- a/src/services/api_gateway.jl +++ b/src/services/api_gateway.jl @@ -189,8 +189,8 @@ Creates a Deployment resource, which makes a specified RestApi callable over the Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"cacheClusterEnabled"`: Enables a cache cluster for the Stage resource specified in the input. -- `"cacheClusterSize"`: Specifies the cache cluster size for the Stage resource specified - in the input, if a cache cluster is enabled. +- `"cacheClusterSize"`: The stage's cache capacity in GB. For more information about + choosing a cache size, see Enabling API caching to enhance responsiveness. - `"canarySettings"`: The input configuration for the canary deployment when the deployment is a canary release deployment. - `"description"`: The description for the Deployment resource to create. @@ -592,7 +592,8 @@ Creates a new Stage resource that references a pre-existing Deployment for the A # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"cacheClusterEnabled"`: Whether cache clustering is enabled for the stage. -- `"cacheClusterSize"`: The stage's cache cluster size. +- `"cacheClusterSize"`: The stage's cache capacity in GB. For more information about + choosing a cache size, see Enabling API caching to enhance responsiveness. - `"canarySettings"`: The canary deployment settings of this stage. - `"description"`: The description of the Stage resource. - `"documentationVersion"`: The version of the associated API documentation. diff --git a/src/services/application_auto_scaling.jl b/src/services/application_auto_scaling.jl index 3c4e807e63..9457d14dc0 100644 --- a/src/services/application_auto_scaling.jl +++ b/src/services/application_auto_scaling.jl @@ -64,7 +64,7 @@ scaling policy in the Application Auto Scaling User Guide. provisioned write capacity for a DynamoDB global secondary index. rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition. - sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an SageMaker model + sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model endpoint variant. custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service. comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference @@ -187,7 +187,7 @@ more information, see Delete a scheduled action in the Application Auto Scaling provisioned write capacity for a DynamoDB global secondary index. rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition. - sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an SageMaker model + sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model endpoint variant. custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service. comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference @@ -314,7 +314,7 @@ with it. a DynamoDB global secondary index. rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition. sagemaker:variant:DesiredInstanceCount - The number of - EC2 instances for an SageMaker model endpoint variant. + EC2 instances for a SageMaker model endpoint variant. custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service. comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference @@ -447,7 +447,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys a DynamoDB global secondary index. rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition. sagemaker:variant:DesiredInstanceCount - The number of - EC2 instances for an SageMaker model endpoint variant. + EC2 instances for a SageMaker model endpoint variant. custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service. comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference @@ -497,7 +497,8 @@ end Provides descriptive information about the scaling activities in the specified namespace from the previous six weeks. You can filter the results using ResourceId and -ScalableDimension. +ScalableDimension. For information about viewing scaling activities using the Amazon Web +Services CLI, see Scaling activities for Application Auto Scaling. # Arguments - `service_namespace`: The namespace of the Amazon Web Services service that provides the @@ -506,6 +507,11 @@ ScalableDimension. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"IncludeNotScaledActivities"`: Specifies whether to include activities that aren't + scaled (not scaled activities) in the response. Not scaled activities are activities that + aren't completed or started for various reasons, such as preventing infinite scaling loops. + For help interpreting the not scaled reason details in the response, see Scaling activities + for Application Auto Scaling. - `"MaxResults"`: The maximum number of scalable targets. This value can be between 1 and 50. The default value is 50. If this parameter is used, the operation returns up to MaxResults results at a time, along with a NextToken value. To get the next set of results, @@ -561,7 +567,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys a DynamoDB global secondary index. rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition. sagemaker:variant:DesiredInstanceCount - The number of - EC2 instances for an SageMaker model endpoint variant. + EC2 instances for a SageMaker model endpoint variant. custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service. comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference @@ -677,7 +683,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys a DynamoDB global secondary index. rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition. sagemaker:variant:DesiredInstanceCount - The number of - EC2 instances for an SageMaker model endpoint variant. + EC2 instances for a SageMaker model endpoint variant. custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service. comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference @@ -792,7 +798,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys a DynamoDB global secondary index. rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition. sagemaker:variant:DesiredInstanceCount - The number of - EC2 instances for an SageMaker model endpoint variant. + EC2 instances for a SageMaker model endpoint variant. custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service. comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference @@ -864,7 +870,9 @@ deregistered, the scalable target is no longer available to execute scaling poli scaling policies that were specified for the scalable target are deleted. # Arguments -- `policy_name`: The name of the scaling policy. +- `policy_name`: The name of the scaling policy. You cannot change the name of a scaling + policy, but you can delete the original scaling policy and create a new scaling policy with + the same settings and a different name. - `resource_id`: The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier. ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: @@ -913,7 +921,7 @@ scaling policies that were specified for the scalable target are deleted. provisioned write capacity for a DynamoDB global secondary index. rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition. - sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an SageMaker model + sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model endpoint variant. custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service. comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference @@ -935,12 +943,12 @@ scaling policies that were specified for the scalable target are deleted. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"PolicyType"`: The policy type. This parameter is required if you are creating a scaling - policy. The following policy types are supported: TargetTrackingScaling—Not supported - for Amazon EMR StepScaling—Not supported for DynamoDB, Amazon Comprehend, Lambda, Amazon - Keyspaces, Amazon MSK, Amazon ElastiCache, or Neptune. For more information, see Target - tracking scaling policies and Step scaling policies in the Application Auto Scaling User - Guide. +- `"PolicyType"`: The scaling policy type. This parameter is required if you are creating a + scaling policy. The following policy types are supported: TargetTrackingScaling—Not + supported for Amazon EMR StepScaling—Not supported for DynamoDB, Amazon Comprehend, + Lambda, Amazon Keyspaces, Amazon MSK, Amazon ElastiCache, or Neptune. For more information, + see Target tracking scaling policies and Step scaling policies in the Application Auto + Scaling User Guide. - `"StepScalingPolicyConfiguration"`: A step scaling policy. This parameter is required if you are creating a policy and the policy type is StepScaling. - `"TargetTrackingScalingPolicyConfiguration"`: A target tracking scaling policy. Includes @@ -1058,7 +1066,7 @@ scheduled actions that were specified for the scalable target are deleted. provisioned write capacity for a DynamoDB global secondary index. rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition. - sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an SageMaker model + sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model endpoint variant. custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service. comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference @@ -1153,24 +1161,28 @@ end register_scalable_target(resource_id, scalable_dimension, service_namespace) register_scalable_target(resource_id, scalable_dimension, service_namespace, params::Dict{String,<:Any}) -Registers or updates a scalable target. A scalable target is a resource that Application -Auto Scaling can scale out and scale in. Scalable targets are uniquely identified by the -combination of resource ID, scalable dimension, and namespace. When you register a new -scalable target, you must specify values for minimum and maximum capacity. Current capacity -will be adjusted within the specified range when scaling starts. Application Auto Scaling -scaling policies will not scale capacity to values that are outside of this range. After -you register a scalable target, you do not need to register it again to use other -Application Auto Scaling operations. To see which resources have been registered, use -DescribeScalableTargets. You can also view the scaling policies for a service namespace by -using DescribeScalableTargets. If you no longer need a scalable target, you can deregister -it by using DeregisterScalableTarget. To update a scalable target, specify the parameters -that you want to change. Include the parameters that identify the scalable target: resource -ID, scalable dimension, and namespace. Any parameters that you don't specify are not -changed by this update request. If you call the RegisterScalableTarget API to update an -existing scalable target, Application Auto Scaling retrieves the current capacity of the -resource. If it is below the minimum capacity or above the maximum capacity, Application -Auto Scaling adjusts the capacity of the scalable target to place it within these bounds, -even if you don't include the MinCapacity or MaxCapacity request parameters. +Registers or updates a scalable target, the resource that you want to scale. Scalable +targets are uniquely identified by the combination of resource ID, scalable dimension, and +namespace, which represents some capacity dimension of the underlying service. When you +register a new scalable target, you must specify values for the minimum and maximum +capacity. If the specified resource is not active in the target service, this operation +does not change the resource's current capacity. Otherwise, it changes the resource's +current capacity to a value that is inside of this range. If you choose to add a scaling +policy, current capacity is adjustable within the specified range when scaling starts. +Application Auto Scaling scaling policies will not scale capacity to values that are +outside of the minimum and maximum range. After you register a scalable target, you do not +need to register it again to use other Application Auto Scaling operations. To see which +resources have been registered, use DescribeScalableTargets. You can also view the scaling +policies for a service namespace by using DescribeScalableTargets. If you no longer need a +scalable target, you can deregister it by using DeregisterScalableTarget. To update a +scalable target, specify the parameters that you want to change. Include the parameters +that identify the scalable target: resource ID, scalable dimension, and namespace. Any +parameters that you don't specify are not changed by this update request. If you call the +RegisterScalableTarget API to update an existing scalable target, Application Auto Scaling +retrieves the current capacity of the resource. If it is below the minimum capacity or +above the maximum capacity, Application Auto Scaling adjusts the capacity of the scalable +target to place it within these bounds, even if you don't include the MinCapacity or +MaxCapacity request parameters. # Arguments - `resource_id`: The identifier of the resource that is associated with the scalable @@ -1222,7 +1234,7 @@ even if you don't include the MinCapacity or MaxCapacity request parameters. a DynamoDB global secondary index. rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition. sagemaker:variant:DesiredInstanceCount - The number of - EC2 instances for an SageMaker model endpoint variant. + EC2 instances for a SageMaker model endpoint variant. custom-resource:ResourceType:Property - The scalable dimension for a custom resource provided by your own application or service. comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference @@ -1251,14 +1263,20 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys quotas may impose lower limits. Each service has its own default quotas for the maximum capacity of the resource. If you want to specify a higher limit, you can request an increase. For more information, consult the documentation for that service. For information - about the default quotas for each service, see Service Endpoints and Quotas in the Amazon + about the default quotas for each service, see Service endpoints and quotas in the Amazon Web Services General Reference. - `"MinCapacity"`: The minimum value that you plan to scale in to. When a scaling policy is in effect, Application Auto Scaling can scale in (contract) as needed to the minimum capacity limit in response to changing demand. This property is required when registering a - new scalable target. For certain resources, the minimum value allowed is 0. This includes - Lambda provisioned concurrency, Spot Fleet, ECS services, Aurora DB clusters, EMR clusters, - and custom resources. For all other resources, the minimum value allowed is 1. + new scalable target. For the following resources, the minimum value allowed is 0. + AppStream 2.0 fleets Aurora DB clusters ECS services EMR clusters Lambda + provisioned concurrency SageMaker endpoint variants Spot Fleets custom resources + It's strongly recommended that you specify a value greater than 0. A value greater than 0 + means that data points are continuously reported to CloudWatch that scaling policies can + use to scale on a metric like average CPU utilization. For all other resources, the minimum + allowed value depends on the type of resource that you are using. If you provide a value + that is lower than what a resource can accept, an error occurs. In which case, the error + message will provide the minimum value that the resource can accept. - `"RoleARN"`: This parameter is required for services that do not support service-linked roles (such as Amazon EMR), and it must specify the ARN of an IAM role that allows Application Auto Scaling to modify the scalable target on your behalf. If the service diff --git a/src/services/athena.jl b/src/services/athena.jl index c0e0bd941c..36689ead78 100644 --- a/src/services/athena.jl +++ b/src/services/athena.jl @@ -1055,8 +1055,10 @@ end get_query_runtime_statistics(query_execution_id, params::Dict{String,<:Any}) Returns query execution runtime statistics related to a single execution of a query if you -have access to the workgroup in which the query ran. The query execution runtime statistics -is returned only when QueryExecutionStatusState is in a SUCCEEDED or FAILED state. +have access to the workgroup in which the query ran. Query execution runtime statistics are +returned only when QueryExecutionStatusState is in a SUCCEEDED or FAILED state. Stage-level +input and output row count and data size statistics are not shown when a query has +row-level filters defined in Lake Formation. # Arguments - `query_execution_id`: The unique ID of the query execution. @@ -2368,13 +2370,15 @@ function update_named_query( end """ - update_notebook(notebook_id) - update_notebook(notebook_id, params::Dict{String,<:Any}) + update_notebook(notebook_id, payload, type) + update_notebook(notebook_id, payload, type, params::Dict{String,<:Any}) Updates the contents of a Spark notebook. # Arguments - `notebook_id`: The ID of the notebook to update. +- `payload`: The updated content for the notebook. +- `type`: The notebook content type. Currently, the only valid type is IPYNB. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -2383,27 +2387,35 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys required because Amazon Web Services SDKs (for example the Amazon Web Services SDK for Java) auto-generate the token for you. If you are not using the Amazon Web Services SDK or the Amazon Web Services CLI, you must provide this token or the action will fail. -- `"Payload"`: The updated content for the notebook. - `"SessionId"`: The ID of the session in which the notebook will be updated. -- `"Type"`: The notebook content type. Currently, the only valid type is IPYNB. """ -function update_notebook(NotebookId; aws_config::AbstractAWSConfig=global_aws_config()) +function update_notebook( + NotebookId, Payload, Type; aws_config::AbstractAWSConfig=global_aws_config() +) return athena( "UpdateNotebook", - Dict{String,Any}("NotebookId" => NotebookId); + Dict{String,Any}("NotebookId" => NotebookId, "Payload" => Payload, "Type" => Type); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end function update_notebook( NotebookId, + Payload, + Type, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return athena( "UpdateNotebook", Dict{String,Any}( - mergewith(_merge, Dict{String,Any}("NotebookId" => NotebookId), params) + mergewith( + _merge, + Dict{String,Any}( + "NotebookId" => NotebookId, "Payload" => Payload, "Type" => Type + ), + params, + ), ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, diff --git a/src/services/cloudfront.jl b/src/services/cloudfront.jl index 4f2d44e39a..ca1baf1d79 100644 --- a/src/services/cloudfront.jl +++ b/src/services/cloudfront.jl @@ -846,16 +846,18 @@ end create_response_headers_policy2020_05_31(response_headers_policy_config, params::Dict{String,<:Any}) Creates a response headers policy. A response headers policy contains information about a -set of HTTP response headers and their values. To create a response headers policy, you -provide some metadata about the policy, and a set of configurations that specify the -response headers. After you create a response headers policy, you can use its ID to attach -it to one or more cache behaviors in a CloudFront distribution. When it's attached to a -cache behavior, CloudFront adds the headers in the policy to HTTP responses that it sends -for requests that match the cache behavior. +set of HTTP headers. To create a response headers policy, you provide some metadata about +the policy and a set of configurations that specify the headers. After you create a +response headers policy, you can use its ID to attach it to one or more cache behaviors in +a CloudFront distribution. When it's attached to a cache behavior, the response headers +policy affects the HTTP headers that CloudFront includes in HTTP responses to requests that +match the cache behavior. CloudFront adds or removes response headers according to the +configuration of the response headers policy. For more information, see Adding or removing +HTTP headers in CloudFront responses in the Amazon CloudFront Developer Guide. # Arguments - `response_headers_policy_config`: Contains metadata about the response headers policy, - and a set of configurations that specify the response headers. + and a set of configurations that specify the HTTP headers. """ function create_response_headers_policy2020_05_31( diff --git a/src/services/cloudwatch_logs.jl b/src/services/cloudwatch_logs.jl index 7b1500a267..12f5cf35e2 100644 --- a/src/services/cloudwatch_logs.jl +++ b/src/services/cloudwatch_logs.jl @@ -1553,10 +1553,7 @@ end Creates or updates an access policy associated with an existing destination. An access policy is an IAM policy document that is used to authorize claims to register a -subscription filter against a given destination. If multiple Amazon Web Services accounts -are sending logs to this destination, each sender account must be listed separately in the -policy. The policy does not support specifying * as the Principal or the use of the -aws:PrincipalOrgId global key. +subscription filter against a given destination. # Arguments - `access_policy`: An IAM policy document that authorizes cross-account users to deliver @@ -1612,27 +1609,26 @@ end put_log_events(log_events, log_group_name, log_stream_name) put_log_events(log_events, log_group_name, log_stream_name, params::Dict{String,<:Any}) -Uploads a batch of log events to the specified log stream. You must include the sequence -token obtained from the response of the previous call. An upload in a newly created log -stream does not require a sequence token. You can also get the sequence token in the -expectedSequenceToken field from InvalidSequenceTokenException. If you call PutLogEvents -twice within a narrow time period using the same value for sequenceToken, both calls might -be successful or one might be rejected. The batch of events must satisfy the following -constraints: The maximum batch size is 1,048,576 bytes. This size is calculated as the -sum of all event messages in UTF-8, plus 26 bytes for each log event. None of the log -events in the batch can be more than 2 hours in the future. None of the log events in the -batch can be more than 14 days in the past. Also, none of the log events can be from -earlier than the retention period of the log group. The log events in the batch must be -in chronological order by their timestamp. The timestamp is the time that the event -occurred, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC. (In -Amazon Web Services Tools for PowerShell and the Amazon Web Services SDK for .NET, the -timestamp is specified in .NET format: yyyy-mm-ddThh:mm:ss. For example, +Uploads a batch of log events to the specified log stream. The sequence token is now +ignored in PutLogEvents actions. PutLogEvents actions are always accepted and never return +InvalidSequenceTokenException or DataAlreadyAcceptedException even if the sequence token is +not valid. You can use parallel PutLogEvents actions on the same log stream. The batch of +events must satisfy the following constraints: The maximum batch size is 1,048,576 bytes. +This size is calculated as the sum of all event messages in UTF-8, plus 26 bytes for each +log event. None of the log events in the batch can be more than 2 hours in the future. +None of the log events in the batch can be more than 14 days in the past. Also, none of the +log events can be from earlier than the retention period of the log group. The log events +in the batch must be in chronological order by their timestamp. The timestamp is the time +that the event occurred, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 +UTC. (In Amazon Web Services Tools for PowerShell and the Amazon Web Services SDK for .NET, +the timestamp is specified in .NET format: yyyy-mm-ddThh:mm:ss. For example, 2017-09-15T13:45:30.) A batch of log events in a single request cannot span more than 24 hours. Otherwise, the operation fails. The maximum number of log events in a batch is -10,000. There is a quota of five requests per second per log stream. Additional requests -are throttled. This quota can't be changed. If a call to PutLogEvents returns -\"UnrecognizedClientException\" the most likely cause is a non-valid Amazon Web Services -access key ID or secret key. +10,000. The quota of five requests per second per log stream has been removed. Instead, +PutLogEvents actions are throttled based on a per-second per-account quota. You can request +an increase to the per-second throttling quota by using the Service Quotas service. If a +call to PutLogEvents returns \"UnrecognizedClientException\" the most likely cause is a +non-valid Amazon Web Services access key ID or secret key. # Arguments - `log_events`: The log events. @@ -1642,10 +1638,9 @@ access key ID or secret key. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"sequenceToken"`: The sequence token obtained from the response of the previous - PutLogEvents call. An upload in a newly created log stream does not require a sequence - token. You can also get the sequence token using DescribeLogStreams. If you call - PutLogEvents twice within a narrow time period using the same value for sequenceToken, both - calls might be successful or one might be rejected. + PutLogEvents call. The sequenceToken parameter is now ignored in PutLogEvents actions. + PutLogEvents actions are now accepted and never return InvalidSequenceTokenException or + DataAlreadyAcceptedException even if the sequence token is not valid. """ function put_log_events( logEvents, diff --git a/src/services/compute_optimizer.jl b/src/services/compute_optimizer.jl index ac07abe977..85a9046d54 100644 --- a/src/services/compute_optimizer.jl +++ b/src/services/compute_optimizer.jl @@ -336,6 +336,74 @@ function export_ec2_instance_recommendations( ) end +""" + export_ecsservice_recommendations(s3_destination_config) + export_ecsservice_recommendations(s3_destination_config, params::Dict{String,<:Any}) + + Exports optimization recommendations for Amazon ECS services on Fargate. Recommendations +are exported in a CSV file, and its metadata in a JSON file, to an existing Amazon Simple +Storage Service (Amazon S3) bucket that you specify. For more information, see Exporting +Recommendations in the Compute Optimizer User Guide. You can only have one Amazon ECS +service export job in progress per Amazon Web Services Region. + +# Arguments +- `s3_destination_config`: + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountIds"`: The Amazon Web Services account IDs for the export ECS service + recommendations. If your account is the management account or the delegated administrator + of an organization, use this parameter to specify the member account you want to export + recommendations to. This parameter can't be specified together with the include member + accounts parameter. The parameters are mutually exclusive. If this parameter or the include + member accounts parameter is omitted, the recommendations for member accounts aren't + included in the export. You can specify multiple account IDs per request. +- `"fieldsToExport"`: The recommendations data to include in the export file. For more + information about the fields that can be exported, see Exported files in the Compute + Optimizer User Guide. +- `"fileFormat"`: The format of the export file. The CSV file is the only export file + format currently supported. +- `"filters"`: An array of objects to specify a filter that exports a more specific set of + ECS service recommendations. +- `"includeMemberAccounts"`: If your account is the management account or the delegated + administrator of an organization, this parameter indicates whether to include + recommendations for resources in all member accounts of the organization. The member + accounts must also be opted in to Compute Optimizer, and trusted access for Compute + Optimizer must be enabled in the organization account. For more information, see Compute + Optimizer and Amazon Web Services Organizations trusted access in the Compute Optimizer + User Guide. If this parameter is omitted, recommendations for member accounts of the + organization aren't included in the export file. If this parameter or the account ID + parameter is omitted, recommendations for member accounts aren't included in the export. +""" +function export_ecsservice_recommendations( + s3DestinationConfig; aws_config::AbstractAWSConfig=global_aws_config() +) + return compute_optimizer( + "ExportECSServiceRecommendations", + Dict{String,Any}("s3DestinationConfig" => s3DestinationConfig); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function export_ecsservice_recommendations( + s3DestinationConfig, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return compute_optimizer( + "ExportECSServiceRecommendations", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("s3DestinationConfig" => s3DestinationConfig), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ export_lambda_function_recommendations(s3_destination_config) export_lambda_function_recommendations(s3_destination_config, params::Dict{String,<:Any}) @@ -614,6 +682,114 @@ function get_ec2_recommendation_projected_metrics( ) end +""" + get_ecsservice_recommendation_projected_metrics(end_time, period, service_arn, start_time, stat) + get_ecsservice_recommendation_projected_metrics(end_time, period, service_arn, start_time, stat, params::Dict{String,<:Any}) + + Returns the projected metrics of Amazon ECS service recommendations. + +# Arguments +- `end_time`: The timestamp of the last projected metrics data point to return. +- `period`: The granularity, in seconds, of the projected metrics data points. +- `service_arn`: The ARN that identifies the ECS service. The following is the format of + the ARN: arn:aws:ecs:region:aws_account_id:service/cluster-name/service-name +- `start_time`: The timestamp of the first projected metrics data point to return. +- `stat`: The statistic of the projected metrics. + +""" +function get_ecsservice_recommendation_projected_metrics( + endTime, + period, + serviceArn, + startTime, + stat; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return compute_optimizer( + "GetECSServiceRecommendationProjectedMetrics", + Dict{String,Any}( + "endTime" => endTime, + "period" => period, + "serviceArn" => serviceArn, + "startTime" => startTime, + "stat" => stat, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_ecsservice_recommendation_projected_metrics( + endTime, + period, + serviceArn, + startTime, + stat, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return compute_optimizer( + "GetECSServiceRecommendationProjectedMetrics", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "endTime" => endTime, + "period" => period, + "serviceArn" => serviceArn, + "startTime" => startTime, + "stat" => stat, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_ecsservice_recommendations() + get_ecsservice_recommendations(params::Dict{String,<:Any}) + + Returns Amazon ECS service recommendations. Compute Optimizer generates recommendations +for Amazon ECS services on Fargate that meet a specific set of requirements. For more +information, see the Supported resources and requirements in the Compute Optimizer User +Guide. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"accountIds"`: Return the ECS service recommendations to the specified Amazon Web + Services account IDs. If your account is the management account or the delegated + administrator of an organization, use this parameter to return the ECS service + recommendations to specific member accounts. You can only specify one account ID per + request. +- `"filters"`: An array of objects to specify a filter that returns a more specific list + of ECS service recommendations. +- `"maxResults"`: The maximum number of ECS service recommendations to return with a + single request. To retrieve the remaining results, make another request with the returned + nextToken value. +- `"nextToken"`: The token to advance to the next page of ECS service recommendations. +- `"serviceArns"`: The ARN that identifies the ECS service. The following is the format + of the ARN: arn:aws:ecs:region:aws_account_id:service/cluster-name/service-name +""" +function get_ecsservice_recommendations(; aws_config::AbstractAWSConfig=global_aws_config()) + return compute_optimizer( + "GetECSServiceRecommendations"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_ecsservice_recommendations( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return compute_optimizer( + "GetECSServiceRecommendations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_effective_recommendation_preferences(resource_arn) get_effective_recommendation_preferences(resource_arn, params::Dict{String,<:Any}) @@ -829,7 +1005,8 @@ Returns the optimization findings for an account. It returns the number of: Am instances in an account that are Underprovisioned, Overprovisioned, or Optimized. Auto Scaling groups in an account that are NotOptimized, or Optimized. Amazon EBS volumes in an account that are NotOptimized, or Optimized. Lambda functions in an account that are -NotOptimized, or Optimized. +NotOptimized, or Optimized. Amazon ECS services in an account that are Underprovisioned, +Overprovisioned, or Optimized. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: diff --git a/src/services/connect.jl b/src/services/connect.jl index 2039b52943..f87ee78a19 100644 --- a/src/services/connect.jl +++ b/src/services/connect.jl @@ -3329,10 +3329,13 @@ Administrator Guide. \"Value\": 24113.0 } The actual OLDEST_CONTACT_AGE is 24 seconds. Name in real-time metrics report: Oldest SLOTS_ACTIVE Unit: COUNT Name in real-time metrics report: Active SLOTS_AVAILABLE Unit: COUNT Name in real-time metrics report: Availability -- `filters`: The queues, up to 100, or channels, to use to filter the metrics returned. - Metric data is retrieved only for the resources associated with the queues or channels - included in the filter. You can include both queue IDs and queue ARNs in the same request. - VOICE, CHAT, and TASK channels are supported. +- `filters`: The filters to apply to returned metrics. You can filter up to the following + limits: Queues: 100 Routing profiles: 100 Channels: 3 (VOICE, CHAT, and TASK channels + are supported.) Metric data is retrieved only for the resources associated with the + queues or routing profiles, and by any channels included in the filter. (You cannot filter + by both queue AND routing profile.) You can include both resource IDs and resource ARNs in + the same request. Currently tagging is only supported on the resources that are passed in + the filter. - `instance_id`: The identifier of the Amazon Connect instance. You can find the instanceId in the ARN of the instance. @@ -3342,13 +3345,18 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys QUEUE, the metrics returned apply to each queue rather than aggregated for all queues. If you group by CHANNEL, you should include a Channels filter. VOICE, CHAT, and TASK channels are supported. If you group by ROUTING_PROFILE, you must include either a queue - or routing profile filter. If no Grouping is included in the request, a summary of - metrics is returned. + or routing profile filter. In addition, a routing profile filter is required for metrics + CONTACTS_SCHEDULED, CONTACTS_IN_QUEUE, and OLDEST_CONTACT_AGE. If no Grouping is + included in the request, a summary of metrics is returned. - `"MaxResults"`: The maximum number of results to return per page. - `"NextToken"`: The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results. The token expires after 5 minutes from the time it is created. Subsequent requests that use the token must use the same request parameters as the request that generated the token. +- `"SortCriteria"`: The way to sort the resulting response based on metrics. You can enter + one sort criteria. By default resources are sorted based on AGENTS_ONLINE, DESCENDING. The + metric collection is sorted based on the input metrics. Note the following: Sorting on + SLOTS_ACTIVE and SLOTS_AVAILABLE is not supported. """ function get_current_metric_data( CurrentMetrics, Filters, InstanceId; aws_config::AbstractAWSConfig=global_aws_config() @@ -3390,9 +3398,12 @@ end Gets the real-time active user data from the specified Amazon Connect instance. # Arguments -- `filters`: Filters up to 100 Queues, or up to 9 ContactStates. The user data is retrieved - only for those users who are associated with the queues and have contacts that are in the - specified ContactState. +- `filters`: The filters to apply to returned user data. You can filter up to the following + limits: Queues: 100 Routing profiles: 100 Agents: 100 Contact states: 9 User + hierarchy groups: 1 The user data is retrieved for only the specified values/resources + in the filter. A maximum of one filter can be passed from queues, routing profiles, agents, + and user hierarchy groups. Currently tagging is only supported on the resources that are + passed in the filter. - `instance_id`: The identifier of the Amazon Connect instance. You can find the instanceId in the ARN of the instance. @@ -5587,7 +5598,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys field. For more information about idempotency, see Making retries safe with idempotent APIs. - `"InitialMessage"`: The initial message to be sent to the newly created chat. - `"SupportedMessagingContentTypes"`: The supported chat message content types. Content - types can be text/plain or both text/plain and text/markdown. + types must always contain text/plain. You can then put any other supported type in the + list. For example, all the following lists are valid because they contain text/plain: + [text/plain, text/markdown, application/json], [text/markdown, text/plain], [text/plain, + application/json]. """ function start_chat_contact( ContactFlowId, @@ -6893,6 +6907,59 @@ function update_instance_storage_config( ) end +""" + update_participant_role_config(channel_configuration, contact_id, instance_id) + update_participant_role_config(channel_configuration, contact_id, instance_id, params::Dict{String,<:Any}) + +Updates timeouts for when human chat participants are to be considered idle, and when +agents are automatically disconnected from a chat due to idleness. You can set four timers: + Customer idle timeout Customer auto-disconnect timeout Agent idle timeout Agent +auto-disconnect timeout For more information about how chat timeouts work, see Set up +chat timeouts for human participants. + +# Arguments +- `channel_configuration`: The Amazon Connect channel you want to configure. +- `contact_id`: The identifier of the contact in this instance of Amazon Connect. +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instanceId + in the ARN of the instance. + +""" +function update_participant_role_config( + ChannelConfiguration, + ContactId, + InstanceId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "PUT", + "/contact/participant-role-config/$(InstanceId)/$(ContactId)", + Dict{String,Any}("ChannelConfiguration" => ChannelConfiguration); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_participant_role_config( + ChannelConfiguration, + ContactId, + InstanceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "PUT", + "/contact/participant-role-config/$(InstanceId)/$(ContactId)", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("ChannelConfiguration" => ChannelConfiguration), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_phone_number(phone_number_id, target_arn) update_phone_number(phone_number_id, target_arn, params::Dict{String,<:Any}) diff --git a/src/services/connectparticipant.jl b/src/services/connectparticipant.jl index 5b97e97316..8d2971df6c 100644 --- a/src/services/connectparticipant.jl +++ b/src/services/connectparticipant.jl @@ -9,13 +9,15 @@ using AWS.UUIDs complete_attachment_upload(attachment_ids, client_token, x-_amz-_bearer, params::Dict{String,<:Any}) Allows you to confirm that the attachment has been uploaded using the pre-signed URL -provided in StartAttachmentUpload API. The Amazon Connect Participant Service APIs do not -use Signature Version 4 authentication. +provided in StartAttachmentUpload API. ConnectionToken is used for invoking this API +instead of ParticipantToken. The Amazon Connect Participant Service APIs do not use +Signature Version 4 authentication. # Arguments - `attachment_ids`: A list of unique identifiers for the attachments. - `client_token`: A unique, case-sensitive identifier that you provide to ensure the - idempotency of the request. + idempotency of the request. If not provided, the Amazon Web Services SDK populates this + field. For more information about idempotency, see Making retries safe with idempotent APIs. - `x-_amz-_bearer`: The authentication token associated with the participant's connection. """ @@ -64,11 +66,11 @@ function complete_attachment_upload( end """ - create_participant_connection(type, x-_amz-_bearer) - create_participant_connection(type, x-_amz-_bearer, params::Dict{String,<:Any}) + create_participant_connection(x-_amz-_bearer) + create_participant_connection(x-_amz-_bearer, params::Dict{String,<:Any}) -Creates the participant's connection. Note that ParticipantToken is used for invoking this -API instead of ConnectionToken. The participant token is valid for the lifetime of the +Creates the participant's connection. ParticipantToken is used for invoking this API +instead of ConnectionToken. The participant token is valid for the lifetime of the participant – until they are part of a contact. The response URL for WEBSOCKET Type has a connect expiry timeout of 100s. Clients must manually connect to the returned websocket URL and subscribe to the desired topic. For chat, you need to publish the following on the @@ -85,7 +87,6 @@ see Feature specifications in the Amazon Connect Administrator Guide. The Amaz Participant Service APIs do not use Signature Version 4 authentication. # Arguments -- `type`: Type of connection information required. - `x-_amz-_bearer`: This is a header parameter. The ParticipantToken as obtained from StartChatContact API response. @@ -93,22 +94,21 @@ Participant Service APIs do not use Signature Version 4 authentication. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"ConnectParticipant"`: Amazon Connect Participant is used to mark the participant as connected for message streaming. +- `"Type"`: Type of connection information required. This can be omitted if + ConnectParticipant is true. """ function create_participant_connection( - Type, X_Amz_Bearer; aws_config::AbstractAWSConfig=global_aws_config() + X_Amz_Bearer; aws_config::AbstractAWSConfig=global_aws_config() ) return connectparticipant( "POST", "/participant/connection", - Dict{String,Any}( - "Type" => Type, "headers" => Dict{String,Any}("X-Amz-Bearer" => X_Amz_Bearer) - ); + Dict{String,Any}("headers" => Dict{String,Any}("X-Amz-Bearer" => X_Amz_Bearer)); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end function create_participant_connection( - Type, X_Amz_Bearer, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), @@ -120,8 +120,7 @@ function create_participant_connection( mergewith( _merge, Dict{String,Any}( - "Type" => Type, - "headers" => Dict{String,Any}("X-Amz-Bearer" => X_Amz_Bearer), + "headers" => Dict{String,Any}("X-Amz-Bearer" => X_Amz_Bearer) ), params, ), @@ -135,9 +134,9 @@ end disconnect_participant(x-_amz-_bearer) disconnect_participant(x-_amz-_bearer, params::Dict{String,<:Any}) -Disconnects a participant. Note that ConnectionToken is used for invoking this API instead -of ParticipantToken. The Amazon Connect Participant Service APIs do not use Signature -Version 4 authentication. +Disconnects a participant. ConnectionToken is used for invoking this API instead of +ParticipantToken. The Amazon Connect Participant Service APIs do not use Signature Version +4 authentication. # Arguments - `x-_amz-_bearer`: The authentication token associated with the participant's connection. @@ -145,7 +144,8 @@ Version 4 authentication. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"ClientToken"`: A unique, case-sensitive identifier that you provide to ensure the - idempotency of the request. + idempotency of the request. If not provided, the Amazon Web Services SDK populates this + field. For more information about idempotency, see Making retries safe with idempotent APIs. """ function disconnect_participant( X_Amz_Bearer; aws_config::AbstractAWSConfig=global_aws_config() @@ -189,8 +189,9 @@ end get_attachment(attachment_id, x-_amz-_bearer, params::Dict{String,<:Any}) Provides a pre-signed URL for download of a completed attachment. This is an asynchronous -API for use with active contacts. The Amazon Connect Participant Service APIs do not use -Signature Version 4 authentication. +API for use with active contacts. ConnectionToken is used for invoking this API instead +of ParticipantToken. The Amazon Connect Participant Service APIs do not use Signature +Version 4 authentication. # Arguments - `attachment_id`: A unique identifier for the attachment. @@ -239,8 +240,8 @@ end get_transcript(x-_amz-_bearer) get_transcript(x-_amz-_bearer, params::Dict{String,<:Any}) -Retrieves a transcript of the session, including details about any attachments. Note that -ConnectionToken is used for invoking this API instead of ParticipantToken. The Amazon +Retrieves a transcript of the session, including details about any attachments. +ConnectionToken is used for invoking this API instead of ParticipantToken. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. # Arguments @@ -293,22 +294,26 @@ end send_event(content_type, x-_amz-_bearer) send_event(content_type, x-_amz-_bearer, params::Dict{String,<:Any}) -Sends an event. Note that ConnectionToken is used for invoking this API instead of -ParticipantToken. The Amazon Connect Participant Service APIs do not use Signature Version +Sends an event. ConnectionToken is used for invoking this API instead of +ParticipantToken. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. # Arguments - `content_type`: The content type of the request. Supported types are: application/vnd.amazonaws.connect.event.typing application/vnd.amazonaws.connect.event.connection.acknowledged + application/vnd.amazonaws.connect.event.message.delivered + application/vnd.amazonaws.connect.event.message.read - `x-_amz-_bearer`: The authentication token associated with the participant's connection. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"ClientToken"`: A unique, case-sensitive identifier that you provide to ensure the - idempotency of the request. -- `"Content"`: The content of the event to be sent (for example, message text). This is not - yet supported. + idempotency of the request. If not provided, the Amazon Web Services SDK populates this + field. For more information about idempotency, see Making retries safe with idempotent APIs. +- `"Content"`: The content of the event to be sent (for example, message text). For content + related to message receipts, this is supported in the form of a JSON string. Sample + Content: \"{\"messageId\":\"11111111-aaaa-bbbb-cccc-EXAMPLE01234\"}\" """ function send_event( ContentType, X_Amz_Bearer; aws_config::AbstractAWSConfig=global_aws_config() @@ -354,19 +359,23 @@ end send_message(content, content_type, x-_amz-_bearer) send_message(content, content_type, x-_amz-_bearer, params::Dict{String,<:Any}) -Sends a message. Note that ConnectionToken is used for invoking this API instead of -ParticipantToken. The Amazon Connect Participant Service APIs do not use Signature Version +Sends a message. ConnectionToken is used for invoking this API instead of +ParticipantToken. The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. # Arguments -- `content`: The content of the message. -- `content_type`: The type of the content. Supported types are text/plain. +- `content`: The content of the message. For text/plain and text/markdown, the Length + Constraints are Minimum of 1, Maximum of 1024. For application/json, the Length + Constraints are Minimum of 1, Maximum of 12000. +- `content_type`: The type of the content. Supported types are text/plain, text/markdown, + and application/json. - `x-_amz-_bearer`: The authentication token associated with the connection. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"ClientToken"`: A unique, case-sensitive identifier that you provide to ensure the - idempotency of the request. + idempotency of the request. If not provided, the Amazon Web Services SDK populates this + field. For more information about idempotency, see Making retries safe with idempotent APIs. """ function send_message( Content, ContentType, X_Amz_Bearer; aws_config::AbstractAWSConfig=global_aws_config() @@ -415,13 +424,16 @@ end start_attachment_upload(attachment_name, attachment_size_in_bytes, client_token, content_type, x-_amz-_bearer) start_attachment_upload(attachment_name, attachment_size_in_bytes, client_token, content_type, x-_amz-_bearer, params::Dict{String,<:Any}) -Provides a pre-signed Amazon S3 URL in response for uploading the file directly to S3. The -Amazon Connect Participant Service APIs do not use Signature Version 4 authentication. +Provides a pre-signed Amazon S3 URL in response for uploading the file directly to S3. +ConnectionToken is used for invoking this API instead of ParticipantToken. The Amazon +Connect Participant Service APIs do not use Signature Version 4 authentication. # Arguments - `attachment_name`: A case-sensitive name of the attachment being uploaded. - `attachment_size_in_bytes`: The size of the attachment in bytes. -- `client_token`: A unique case sensitive identifier to support idempotency of request. +- `client_token`: A unique, case-sensitive identifier that you provide to ensure the + idempotency of the request. If not provided, the Amazon Web Services SDK populates this + field. For more information about idempotency, see Making retries safe with idempotent APIs. - `content_type`: Describes the MIME file type of the attachment. For a list of supported file types, see Feature specifications in the Amazon Connect Administrator Guide. - `x-_amz-_bearer`: The authentication token associated with the participant's connection. diff --git a/src/services/ec2.jl b/src/services/ec2.jl index d203aca1e0..b42afbcf18 100644 --- a/src/services/ec2.jl +++ b/src/services/ec2.jl @@ -2263,7 +2263,7 @@ end Removes your Amazon Web Services account from the launch permissions for the specified AMI. For more information, see Cancel having an AMI shared with your Amazon Web Services account -in the Amazon Elastic Compute Cloud User Guide. +in the Amazon EC2 User Guide. # Arguments - `image_id`: The ID of the AMI that was shared with your Amazon Web Services account. @@ -2586,9 +2586,8 @@ specify the ARN of the destination Outpost using DestinationOutpostArn. Backing copied to an Outpost are encrypted by default using the default encryption key for the Region, or a different key that you specify in the request using KmsKeyId. Outposts do not support unencrypted snapshots. For more information, Amazon EBS local snapshots on -Outposts in the Amazon Elastic Compute Cloud User Guide. For more information about the -prerequisites and limits when copying an AMI, see Copy an AMI in the Amazon Elastic Compute -Cloud User Guide. +Outposts in the Amazon EC2 User Guide. For more information about the prerequisites and +limits when copying an AMI, see Copy an AMI in the Amazon EC2 User Guide. # Arguments - `name`: The name of the new AMI in the destination Region. @@ -2609,7 +2608,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys to an Outpost. The AMI must be in the Region of the destination Outpost. You cannot copy an AMI from an Outpost to a Region, from one Outpost to another, or within the same Outpost. For more information, see Copy AMIs from an Amazon Web Services Region to an Outpost in - the Amazon Elastic Compute Cloud User Guide. + the Amazon EC2 User Guide. - `"dryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. @@ -2617,7 +2616,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys encrypted. You can encrypt a copy of an unencrypted snapshot, but you cannot create an unencrypted copy of an encrypted snapshot. The default KMS key for Amazon EBS is used unless you specify a non-default Key Management Service (KMS) KMS key using KmsKeyId. For - more information, see Amazon EBS encryption in the Amazon Elastic Compute Cloud User Guide. + more information, see Amazon EBS encryption in the Amazon EC2 User Guide. - `"kmsKeyId"`: The identifier of the symmetric Key Management Service (KMS) KMS key to use when creating encrypted volumes. If this parameter is not specified, your Amazon Web Services managed KMS key for Amazon EBS is used. If you specify a KMS key, you must also @@ -5346,8 +5345,8 @@ end Starts a task that restores an AMI from an Amazon S3 object that was previously created by using CreateStoreImageTask. To use this API, you must have the required permissions. For more information, see Permissions for storing and restoring AMIs using Amazon S3 in the -Amazon Elastic Compute Cloud User Guide. For more information, see Store and restore an AMI -using Amazon S3 in the Amazon Elastic Compute Cloud User Guide. +Amazon EC2 User Guide. For more information, see Store and restore an AMI using Amazon S3 +in the Amazon EC2 User Guide. # Arguments - `bucket`: The name of the Amazon S3 bucket that contains the stored AMI object. @@ -5756,8 +5755,8 @@ end Stores an AMI as a single object in an Amazon S3 bucket. To use this API, you must have the required permissions. For more information, see Permissions for storing and restoring AMIs -using Amazon S3 in the Amazon Elastic Compute Cloud User Guide. For more information, see -Store and restore an AMI using Amazon S3 in the Amazon Elastic Compute Cloud User Guide. +using Amazon S3 in the Amazon EC2 User Guide. For more information, see Store and restore +an AMI using Amazon S3 in the Amazon EC2 User Guide. # Arguments - `bucket`: The name of the Amazon S3 bucket in which the AMI object will be stored. The @@ -11043,13 +11042,12 @@ end Deregisters the specified AMI. After you deregister an AMI, it can't be used to launch new instances. If you deregister an AMI that matches a Recycle Bin retention rule, the AMI is retained in the Recycle Bin for the specified retention period. For more information, see -Recycle Bin in the Amazon Elastic Compute Cloud User Guide. When you deregister an AMI, it -doesn't affect any instances that you've already launched from the AMI. You'll continue to -incur usage costs for those instances until you terminate them. When you deregister an -Amazon EBS-backed AMI, it doesn't affect the snapshot that was created for the root volume -of the instance during the AMI creation process. When you deregister an instance -store-backed AMI, it doesn't affect the files that you uploaded to Amazon S3 when you -created the AMI. +Recycle Bin in the Amazon EC2 User Guide. When you deregister an AMI, it doesn't affect any +instances that you've already launched from the AMI. You'll continue to incur usage costs +for those instances until you terminate them. When you deregister an Amazon EBS-backed AMI, +it doesn't affect the snapshot that was created for the root volume of the instance during +the AMI creation process. When you deregister an instance store-backed AMI, it doesn't +affect the files that you uploaded to Amazon S3 when you created the AMI. # Arguments - `image_id`: The ID of the AMI. @@ -13009,6 +13007,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"IncludeDeprecated"`: Specifies whether to include deprecated AMIs. Default: No deprecated AMIs are included in the response. If you are the AMI owner, all deprecated AMIs appear in the response regardless of what you specify for this parameter. +- `"MaxResults"`: The maximum number of results to return with a single call. To retrieve + the remaining results, make another call with the returned nextToken value. +- `"NextToken"`: The token for the next page of results. - `"Owner"`: Scopes the results to images with the specified owners. You can specify a combination of Amazon Web Services account IDs, self, amazon, and aws-marketplace. If you omit this parameter, the results include all images for which you have launch permissions, @@ -16054,8 +16055,8 @@ Completed, or Failed. For tasks InProgress, the response shows the estimated pro percentage. Tasks are listed in reverse chronological order. Currently, only tasks from the past 31 days can be viewed. To use this API, you must have the required permissions. For more information, see Permissions for storing and restoring AMIs using Amazon S3 in the -Amazon Elastic Compute Cloud User Guide. For more information, see Store and restore an AMI -using Amazon S3 in the Amazon Elastic Compute Cloud User Guide. +Amazon EC2 User Guide. For more information, see Store and restore an AMI using Amazon S3 +in the Amazon EC2 User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -18338,7 +18339,7 @@ end disable_image_deprecation(image_id, params::Dict{String,<:Any}) Cancels the deprecation of the specified AMI. For more information, see Deprecate an AMI in -the Amazon Elastic Compute Cloud User Guide. +the Amazon EC2 User Guide. # Arguments - `image_id`: The ID of the AMI. @@ -19394,7 +19395,7 @@ end enable_image_deprecation(deprecate_at, image_id, params::Dict{String,<:Any}) Enables deprecation of the specified AMI at the specified date and time. For more -information, see Deprecate an AMI in the Amazon Elastic Compute Cloud User Guide. +information, see Deprecate an AMI in the Amazon EC2 User Guide. # Arguments - `deprecate_at`: The date and time to deprecate the AMI, in UTC, in the following format: @@ -22251,7 +22252,7 @@ end list_images_in_recycle_bin(params::Dict{String,<:Any}) Lists one or more AMIs that are currently in the Recycle Bin. For more information, see -Recycle Bin in the Amazon Elastic Compute Cloud User Guide. +Recycle Bin in the Amazon EC2 User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -26453,7 +26454,7 @@ AMI with a billing product code, make sure that the Reserved Instance has the ma billing product code. If you purchase a Reserved Instance without the matching billing product code, the Reserved Instance will not be applied to the On-Demand Instance. For information about how to obtain the platform details and billing information of an AMI, see -Understand AMI billing information in the Amazon Elastic Compute Cloud User Guide. +Understand AMI billing information in the Amazon EC2 User Guide. # Arguments - `name`: A name for your AMI. Constraints: 3-128 alphanumeric characters, parentheses @@ -26463,16 +26464,19 @@ Understand AMI billing information in the Amazon Elastic Compute Cloud User Guid # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"BillingProduct"`: The billing product codes. Your account must be authorized to specify - billing product codes. Otherwise, you can use the Amazon Web Services Marketplace to bill - for the use of an AMI. + billing product codes. If your account is not authorized to specify billing product codes, + you can publish AMIs that include billable software and list them on the Amazon Web + Services Marketplace. You must first register as a seller on the Amazon Web Services + Marketplace. For more information, see Getting started as a seller and AMI-based products + in the Amazon Web Services Marketplace Seller Guide. - `"BlockDeviceMapping"`: The block device mapping entries. If you specify an Amazon EBS volume using the ID of an Amazon EBS snapshot, you can't specify the encryption state of the volume. If you create an AMI on an Outpost, then all backing snapshots must be on the same Outpost or in the Region of that Outpost. AMIs on an Outpost that include local snapshots can be used to launch instances on the same Outpost only. For more information, - Amazon EBS local snapshots on Outposts in the Amazon Elastic Compute Cloud User Guide. + Amazon EBS local snapshots on Outposts in the Amazon EC2 User Guide. - `"BootMode"`: The boot mode of the AMI. For more information, see Boot modes in the - Amazon Elastic Compute Cloud User Guide. + Amazon EC2 User Guide. - `"ImageLocation"`: The full path to your AMI manifest in Amazon S3 storage. The specified bucket must have the aws-exec-read canned access control list (ACL) to ensure that it can be accessed by Amazon EC2. For more information, see Canned ACLs in the Amazon S3 Service @@ -26481,14 +26485,14 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys launched from this AMI will have HttpTokens automatically set to required so that, by default, the instance requires that IMDSv2 is used when requesting instance metadata. In addition, HttpPutResponseHopLimit is set to 2. For more information, see Configure the AMI - in the Amazon Elastic Compute Cloud User Guide. If you set the value to v2.0, make sure - that your AMI software can support IMDSv2. + in the Amazon EC2 User Guide. If you set the value to v2.0, make sure that your AMI + software can support IMDSv2. - `"TpmSupport"`: Set to v2.0 to enable Trusted Platform Module (TPM) support. For more - information, see NitroTPM in the Amazon Elastic Compute Cloud User Guide. + information, see NitroTPM in the Amazon EC2 User Guide. - `"UefiData"`: Base64 representation of the non-volatile UEFI variable store. To retrieve the UEFI data, use the GetInstanceUefiData command. You can inspect and modify the UEFI data by using the python-uefivars tool on GitHub. For more information, see UEFI Secure - Boot in the Amazon Elastic Compute Cloud User Guide. + Boot in the Amazon EC2 User Guide. - `"architecture"`: The architecture of the AMI. Default: For Amazon EBS-backed AMIs, i386. For instance store-backed AMIs, the architecture specified in the manifest file. - `"description"`: A description for your AMI. @@ -27939,7 +27943,7 @@ end restore_image_from_recycle_bin(image_id, params::Dict{String,<:Any}) Restores an AMI from the Recycle Bin. For more information, see Recycle Bin in the Amazon -Elastic Compute Cloud User Guide. +EC2 User Guide. # Arguments - `image_id`: The ID of the AMI to restore. diff --git a/src/services/elasticache.jl b/src/services/elasticache.jl index 9e9b3acc73..40ec76c36b 100644 --- a/src/services/elasticache.jl +++ b/src/services/elasticache.jl @@ -487,10 +487,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys parameter is only valid if the Engine parameter is redis. - `"Tags"`: A list of tags to be added to this resource. - `"TransitEncryptionEnabled"`: A flag that enables in-transit encryption when set to true. - You cannot modify the value of TransitEncryptionEnabled after the cluster is created. To - enable in-transit encryption on a cluster you must set TransitEncryptionEnabled to true - when you create a cluster. Only available when creating a cache cluster in an Amazon VPC - using Memcached version 1.6.12 or later. + Only available when creating a cache cluster in an Amazon VPC using Memcached version + 1.6.12 or later. """ function create_cache_cluster( CacheClusterId; aws_config::AbstractAWSConfig=global_aws_config() @@ -875,7 +873,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys replication groups using the r6gd node type. This parameter must be set to true when using r6gd nodes. For more information, see Data tiering. - `"Engine"`: The name of the cache engine to be used for the clusters in this replication - group. Must be Redis. + group. The value must be set to Redis. - `"EngineVersion"`: The version number of the cache engine to be used for the clusters in this replication group. To view the supported cache engine versions, use the DescribeCacheEngineVersions operation. Important: You can upgrade to a newer engine @@ -963,14 +961,20 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Key=myKey, Value=myKeyValue Key=mySecondKey, Value=mySecondKeyValue. Tags on replication groups will be replicated to all nodes. - `"TransitEncryptionEnabled"`: A flag that enables in-transit encryption when set to true. - You cannot modify the value of TransitEncryptionEnabled after the cluster is created. To - enable in-transit encryption on a cluster you must set TransitEncryptionEnabled to true - when you create a cluster. This parameter is valid only if the Engine parameter is redis, - the EngineVersion parameter is 3.2.6, 4.x or later, and the cluster is being created in an - Amazon VPC. If you enable in-transit encryption, you must also specify a value for - CacheSubnetGroup. Required: Only available when creating a replication group in an Amazon - VPC using redis version 3.2.6, 4.x or later. Default: false For HIPAA compliance, you - must specify TransitEncryptionEnabled as true, an AuthToken, and a CacheSubnetGroup. + This parameter is valid only if the Engine parameter is redis, the EngineVersion parameter + is 3.2.6, 4.x or later, and the cluster is being created in an Amazon VPC. If you enable + in-transit encryption, you must also specify a value for CacheSubnetGroup. Required: Only + available when creating a replication group in an Amazon VPC using redis version 3.2.6, 4.x + or later. Default: false For HIPAA compliance, you must specify TransitEncryptionEnabled + as true, an AuthToken, and a CacheSubnetGroup. +- `"TransitEncryptionMode"`: A setting that allows you to migrate your clients to use + in-transit encryption, with no downtime. When setting TransitEncryptionEnabled to true, you + can set your TransitEncryptionMode to preferred in the same request, to allow both + encrypted and unencrypted connections at the same time. Once you migrate all your Redis + clients to use encrypted connections you can modify the value to required to allow + encrypted connections only. Setting TransitEncryptionMode to required is a two-step process + that requires you to first set the TransitEncryptionMode to preferred first, after that you + can set TransitEncryptionMode to required. - `"UserGroupIds"`: The user group to associate with the replication group. """ function create_replication_group( @@ -1749,9 +1753,9 @@ Returns a list of the available cache engines and their versions. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"CacheParameterGroupFamily"`: The name of a specific cache parameter group family to return details for. Valid values are: memcached1.4 | memcached1.5 | memcached1.6 | redis2.6 - | redis2.8 | redis3.2 | redis4.0 | redis5.0 | redis6.x | redis6.2 Constraints: Must be 1 - to 255 alphanumeric characters First character must be a letter Cannot end with a - hyphen or contain two consecutive hyphens + | redis2.8 | redis3.2 | redis4.0 | redis5.0 | redis6.x | redis6.2 | redis7 Constraints: + Must be 1 to 255 alphanumeric characters First character must be a letter Cannot end + with a hyphen or contain two consecutive hyphens - `"DefaultOnly"`: If true, specifies that only the default version of the specified engine or engine and major version combination is to be returned. - `"Engine"`: The cache engine to return. Valid values: memcached | redis @@ -1947,7 +1951,7 @@ Returns the default engine and system parameter information for the specified ca # Arguments - `cache_parameter_group_family`: The name of the cache parameter group family. Valid values are: memcached1.4 | memcached1.5 | memcached1.6 | redis2.6 | redis2.8 | redis3.2 | - redis4.0 | redis5.0 | redis6.x | redis6.2 + redis4.0 | redis5.0 | redis6.x | redis6.2 | redis7 # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -3163,6 +3167,17 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"SnapshottingClusterId"`: The cluster ID that is used as the daily snapshot source for the replication group. This parameter cannot be set for Redis (cluster mode enabled) replication groups. +- `"TransitEncryptionEnabled"`: A flag that enables in-transit encryption when set to true. + If you are enabling in-transit encryption for an existing cluster, you must also set + TransitEncryptionMode to preferred. +- `"TransitEncryptionMode"`: A setting that allows you to migrate your clients to use + in-transit encryption, with no downtime. You must set TransitEncryptionEnabled to true, for + your existing cluster, and set TransitEncryptionMode to preferred in the same request to + allow both encrypted and unencrypted connections at the same time. Once you migrate all + your Redis clients to use encrypted connections you can set the value to required to allow + encrypted connections only. Setting TransitEncryptionMode to required is a two-step process + that requires you to first set the TransitEncryptionMode to preferred first, after that you + can set TransitEncryptionMode to required. - `"UserGroupIdsToAdd"`: The ID of the user group you are associating with the replication group. - `"UserGroupIdsToRemove"`: The ID of the user group to disassociate from the replication diff --git a/src/services/emr.jl b/src/services/emr.jl index 29abe9dcd2..3b5df07edb 100644 --- a/src/services/emr.jl +++ b/src/services/emr.jl @@ -100,16 +100,15 @@ each job flow. If your cluster is long-running (such as a Hive data warehouse) o you may require more than 256 steps to process your data. You can bypass the 256-step limitation in various ways, including using SSH to connect to the master node and submitting queries directly to the software running on the master node, such as Hive and -Hadoop. For more information on how to do this, see Add More than 256 Steps to a Cluster in -the Amazon EMR Management Guide. A step specifies the location of a JAR file stored either -on the master node of the cluster or in Amazon S3. Each step is performed by the main -function of the main class of the JAR file. The main class can be specified either in the -manifest of the JAR or by using the MainFunction parameter of the step. Amazon EMR executes -each step in the order listed. For a step to be considered complete, the main function must -exit with a zero exit code and all Hadoop jobs started while the step was running must have -completed and run successfully. You can only add steps to a cluster that is in one of the -following states: STARTING, BOOTSTRAPPING, RUNNING, or WAITING. The string values passed -into HadoopJarStep object cannot exceed a total of 10240 characters. +Hadoop. A step specifies the location of a JAR file stored either on the master node of the +cluster or in Amazon S3. Each step is performed by the main function of the main class of +the JAR file. The main class can be specified either in the manifest of the JAR or by using +the MainFunction parameter of the step. Amazon EMR executes each step in the order listed. +For a step to be considered complete, the main function must exit with a zero exit code and +all Hadoop jobs started while the step was running must have completed and run +successfully. You can only add steps to a cluster that is in one of the following states: +STARTING, BOOTSTRAPPING, RUNNING, or WAITING. The string values passed into HadoopJarStep +object cannot exceed a total of 10240 characters. # Arguments - `job_flow_id`: A string that uniquely identifies the job flow. This identifier is @@ -297,8 +296,8 @@ end Creates a new Amazon EMR Studio. # Arguments -- `auth_mode`: Specifies whether the Studio authenticates users using IAM or Amazon Web - Services SSO. +- `auth_mode`: Specifies whether the Studio authenticates users using IAM or IAM Identity + Center. - `default_s3_location`: The Amazon S3 location to back up Amazon EMR Studio Workspaces and notebook files. - `engine_security_group_id`: The ID of the Amazon EMR Studio Engine security group. The @@ -332,8 +331,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys key-value pairs that consist of a required key string with a maximum of 128 characters, and an optional value string with a maximum of 256 characters. - `"UserRole"`: The IAM user role that users and groups assume when logged in to an Amazon - EMR Studio. Only specify a UserRole when you use Amazon Web Services SSO authentication. - The permissions attached to the UserRole can be scoped down for each user or group using + EMR Studio. Only specify a UserRole when you use IAM Identity Center authentication. The + permissions attached to the UserRole can be scoped down for each user or group using session policies. """ function create_studio( @@ -404,7 +403,7 @@ end Maps a user or group to the Amazon EMR Studio specified by StudioId, and applies a session policy to refine Studio permissions for that user or group. Use CreateStudioSessionMapping -to assign users to a Studio when you use Amazon Web Services SSO authentication. For +to assign users to a Studio when you use IAM Identity Center authentication. For instructions on how to assign users to a Studio when you use IAM authentication, see Assign a user or group to your EMR Studio. @@ -419,13 +418,13 @@ a user or group to your EMR Studio. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"IdentityId"`: The globally unique identifier (GUID) of the user or group from the - Amazon Web Services SSO Identity Store. For more information, see UserId and GroupId in the - Amazon Web Services SSO Identity Store API Reference. Either IdentityName or IdentityId - must be specified, but not both. +- `"IdentityId"`: The globally unique identifier (GUID) of the user or group from the IAM + Identity Center Identity Store. For more information, see UserId and GroupId in the IAM + Identity Center Identity Store API Reference. Either IdentityName or IdentityId must be + specified, but not both. - `"IdentityName"`: The name of the user or group. For more information, see UserName and - DisplayName in the Amazon Web Services SSO Identity Store API Reference. Either - IdentityName or IdentityId must be specified, but not both. + DisplayName in the IAM Identity Center Identity Store API Reference. Either IdentityName or + IdentityId must be specified, but not both. """ function create_studio_session_mapping( IdentityType, @@ -547,12 +546,11 @@ Removes a user or group from an Amazon EMR Studio. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"IdentityId"`: The globally unique identifier (GUID) of the user or group to remove from - the Amazon EMR Studio. For more information, see UserId and GroupId in the Amazon Web - Services SSO Identity Store API Reference. Either IdentityName or IdentityId must be - specified. + the Amazon EMR Studio. For more information, see UserId and GroupId in the IAM Identity + Center Identity Store API Reference. Either IdentityName or IdentityId must be specified. - `"IdentityName"`: The name of the user name or group to remove from the Amazon EMR - Studio. For more information, see UserName and DisplayName in the Amazon Web Services SSO - Store API Reference. Either IdentityName or IdentityId must be specified. + Studio. For more information, see UserName and DisplayName in the IAM Identity Center Store + API Reference. Either IdentityName or IdentityId must be specified. """ function delete_studio_session_mapping( IdentityType, StudioId; aws_config::AbstractAWSConfig=global_aws_config() @@ -889,6 +887,55 @@ function get_block_public_access_configuration( ) end +""" + get_cluster_session_credentials(cluster_id, execution_role_arn) + get_cluster_session_credentials(cluster_id, execution_role_arn, params::Dict{String,<:Any}) + +Provides Temporary, basic HTTP credentials that are associated with a given runtime IAM +role and used by a cluster with fine-grained access control activated. You can use these +credentials to connect to cluster endpoints that support username-based and password-based +authentication. + +# Arguments +- `cluster_id`: The unique identifier of the cluster. +- `execution_role_arn`: The Amazon Resource Name (ARN) of the runtime role for interactive + workload submission on the cluster. The runtime role can be a cross-account IAM role. The + runtime role ARN is a combination of account ID, role name, and role type using the + following format: arn:partition:service:region:account:resource. + +""" +function get_cluster_session_credentials( + ClusterId, ExecutionRoleArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return emr( + "GetClusterSessionCredentials", + Dict{String,Any}("ClusterId" => ClusterId, "ExecutionRoleArn" => ExecutionRoleArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_cluster_session_credentials( + ClusterId, + ExecutionRoleArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return emr( + "GetClusterSessionCredentials", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ClusterId" => ClusterId, "ExecutionRoleArn" => ExecutionRoleArn + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_managed_scaling_policy(cluster_id) get_managed_scaling_policy(cluster_id, params::Dict{String,<:Any}) @@ -938,11 +985,11 @@ Fetches mapping details for the specified Amazon EMR Studio and identity (user o # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"IdentityId"`: The globally unique identifier (GUID) of the user or group. For more - information, see UserId and GroupId in the Amazon Web Services SSO Identity Store API + information, see UserId and GroupId in the IAM Identity Center Identity Store API Reference. Either IdentityName or IdentityId must be specified. - `"IdentityName"`: The name of the user or group to fetch. For more information, see - UserName and DisplayName in the Amazon Web Services SSO Identity Store API Reference. - Either IdentityName or IdentityId must be specified. + UserName and DisplayName in the IAM Identity Center Identity Store API Reference. Either + IdentityName or IdentityId must be specified. """ function get_studio_session_mapping( IdentityType, StudioId; aws_config::AbstractAWSConfig=global_aws_config() @@ -1864,12 +1911,10 @@ of 256 steps are allowed in each job flow. If your cluster is long-running (such data warehouse) or complex, you may require more than 256 steps to process your data. You can bypass the 256-step limitation in various ways, including using the SSH shell to connect to the master node and submitting queries directly to the software running on the -master node, such as Hive and Hadoop. For more information on how to do this, see Add More -than 256 Steps to a Cluster in the Amazon EMR Management Guide. For long running clusters, -we recommend that you periodically store your results. The instance fleets configuration -is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions. The -RunJobFlow request can contain InstanceFleets parameters or InstanceGroups parameters, but -not both. +master node, such as Hive and Hadoop. For long-running clusters, we recommend that you +periodically store your results. The instance fleets configuration is available only in +Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions. The RunJobFlow request can +contain InstanceFleets parameters or InstanceGroups parameters, but not both. # Arguments - `instances`: A specification of the number and type of Amazon EC2 instances. @@ -1958,7 +2003,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys and later, and is the default for versions of Amazon EMR earlier than 5.1.0. - `"SecurityConfiguration"`: The name of a security configuration to apply to the cluster. - `"ServiceRole"`: The IAM role that Amazon EMR assumes in order to access Amazon Web - Services resources on your behalf. + Services resources on your behalf. If you've created a custom service role path, you must + specify it for the service role when you launch your cluster. - `"StepConcurrencyLevel"`: Specifies the number of steps that can be executed concurrently. The default value is 1. The maximum value is 256. - `"Steps"`: A list of steps to run. @@ -2337,11 +2383,11 @@ Studio. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"IdentityId"`: The globally unique identifier (GUID) of the user or group. For more - information, see UserId and GroupId in the Amazon Web Services SSO Identity Store API + information, see UserId and GroupId in the IAM Identity Center Identity Store API Reference. Either IdentityName or IdentityId must be specified. - `"IdentityName"`: The name of the user or group to update. For more information, see - UserName and DisplayName in the Amazon Web Services SSO Identity Store API Reference. - Either IdentityName or IdentityId must be specified. + UserName and DisplayName in the IAM Identity Center Identity Store API Reference. Either + IdentityName or IdentityId must be specified. """ function update_studio_session_mapping( IdentityType, diff --git a/src/services/emr_serverless.jl b/src/services/emr_serverless.jl index 5636a4e30c..f7334df5b0 100644 --- a/src/services/emr_serverless.jl +++ b/src/services/emr_serverless.jl @@ -49,7 +49,7 @@ Creates an application. # Arguments - `client_token`: The client idempotency token of the application to create. Its value must be unique for each request. -- `release_label`: The EMR release version associated with the application. +- `release_label`: The EMR release associated with the application. - `type`: The type of application you want to start, such as Spark or Hive. # Optional Parameters @@ -59,6 +59,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys on job submission. - `"autoStopConfiguration"`: The configuration for an application to automatically stop after a certain amount of time being idle. +- `"imageConfiguration"`: The image configuration for all worker types. You can either set + this parameter or imageConfiguration for each worker type in workerTypeSpecifications. - `"initialCapacity"`: The capacity to initialize when the application is created. - `"maximumCapacity"`: The maximum capacity to allocate when the application is created. This is cumulative across all workers at any given point in time, not just when an @@ -67,6 +69,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"name"`: The name of the application. - `"networkConfiguration"`: The network configuration for customer VPC connectivity. - `"tags"`: The tags assigned to the application. +- `"workerTypeSpecifications"`: The key-value pairs that specify worker type to + WorkerTypeSpecificationInput. This parameter must contain all valid worker types for a + Spark or Hive application. Valid worker types include Driver and Executor for Spark + applications and HiveDriver and TezTask for Hive applications. You can either set image + details in this parameter for each worker type, or in imageConfiguration for all worker + types. """ function create_application( clientToken, releaseLabel, type; aws_config::AbstractAWSConfig=global_aws_config() @@ -577,11 +585,20 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys on job submission. - `"autoStopConfiguration"`: The configuration for an application to automatically stop after a certain amount of time being idle. +- `"imageConfiguration"`: The image configuration to be used for all worker types. You can + either set this parameter or imageConfiguration for each worker type in + WorkerTypeSpecificationInput. - `"initialCapacity"`: The capacity to initialize when the application is updated. - `"maximumCapacity"`: The maximum capacity to allocate when the application is updated. This is cumulative across all workers at any given point in time during the lifespan of the application. No new resources will be created once any one of the defined limits is hit. - `"networkConfiguration"`: +- `"workerTypeSpecifications"`: The key-value pairs that specify worker type to + WorkerTypeSpecificationInput. This parameter must contain all valid worker types for a + Spark or Hive application. Valid worker types include Driver and Executor for Spark + applications and HiveDriver and TezTask for Hive applications. You can either set image + details in this parameter for each worker type, or in imageConfiguration for all worker + types. """ function update_application( applicationId, clientToken; aws_config::AbstractAWSConfig=global_aws_config() diff --git a/src/services/iotdeviceadvisor.jl b/src/services/iotdeviceadvisor.jl index 6a4ae54de6..72164ff562 100644 --- a/src/services/iotdeviceadvisor.jl +++ b/src/services/iotdeviceadvisor.jl @@ -5,30 +5,48 @@ using AWS.Compat using AWS.UUIDs """ - create_suite_definition() - create_suite_definition(params::Dict{String,<:Any}) + create_suite_definition(suite_definition_configuration) + create_suite_definition(suite_definition_configuration, params::Dict{String,<:Any}) Creates a Device Advisor test suite. Requires permission to access the CreateSuiteDefinition action. +# Arguments +- `suite_definition_configuration`: Creates a Device Advisor test suite with suite + definition configuration. + # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"suiteDefinitionConfiguration"`: Creates a Device Advisor test suite with suite - definition configuration. - `"tags"`: The tags to be attached to the suite definition. """ -function create_suite_definition(; aws_config::AbstractAWSConfig=global_aws_config()) +function create_suite_definition( + suiteDefinitionConfiguration; aws_config::AbstractAWSConfig=global_aws_config() +) return iotdeviceadvisor( - "POST", "/suiteDefinitions"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + "POST", + "/suiteDefinitions", + Dict{String,Any}("suiteDefinitionConfiguration" => suiteDefinitionConfiguration); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, ) end function create_suite_definition( - params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() + suiteDefinitionConfiguration, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), ) return iotdeviceadvisor( "POST", "/suiteDefinitions", - params; + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "suiteDefinitionConfiguration" => suiteDefinitionConfiguration + ), + params, + ), + ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) @@ -272,7 +290,8 @@ Lists the tags attached to an IoT Device Advisor resource. Requires permission t the ListTagsForResource action. # Arguments -- `resource_arn`: The ARN of the IoT Device Advisor resource. +- `resource_arn`: The resource ARN of the IoT Device Advisor resource. This can be + SuiteDefinition ARN or SuiteRun ARN. """ function list_tags_for_resource( @@ -300,40 +319,50 @@ function list_tags_for_resource( end """ - start_suite_run(suite_definition_id) - start_suite_run(suite_definition_id, params::Dict{String,<:Any}) + start_suite_run(suite_definition_id, suite_run_configuration) + start_suite_run(suite_definition_id, suite_run_configuration, params::Dict{String,<:Any}) Starts a Device Advisor test suite run. Requires permission to access the StartSuiteRun action. # Arguments - `suite_definition_id`: Suite definition ID of the test suite. +- `suite_run_configuration`: Suite run configuration. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"suiteDefinitionVersion"`: Suite definition version of the test suite. -- `"suiteRunConfiguration"`: Suite run configuration. - `"tags"`: The tags to be attached to the suite run. """ function start_suite_run( - suiteDefinitionId; aws_config::AbstractAWSConfig=global_aws_config() + suiteDefinitionId, + suiteRunConfiguration; + aws_config::AbstractAWSConfig=global_aws_config(), ) return iotdeviceadvisor( "POST", - "/suiteDefinitions/$(suiteDefinitionId)/suiteRuns"; + "/suiteDefinitions/$(suiteDefinitionId)/suiteRuns", + Dict{String,Any}("suiteRunConfiguration" => suiteRunConfiguration); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end function start_suite_run( suiteDefinitionId, + suiteRunConfiguration, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return iotdeviceadvisor( "POST", "/suiteDefinitions/$(suiteDefinitionId)/suiteRuns", - params; + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("suiteRunConfiguration" => suiteRunConfiguration), + params, + ), + ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) @@ -384,7 +413,8 @@ Adds to and modifies existing tags of an IoT Device Advisor resource. Requires p to access the TagResource action. # Arguments -- `resource_arn`: The resource ARN of an IoT Device Advisor resource. +- `resource_arn`: The resource ARN of an IoT Device Advisor resource. This can be + SuiteDefinition ARN or SuiteRun ARN. - `tags`: The tags to be attached to the IoT Device Advisor resource. """ @@ -420,7 +450,8 @@ Removes tags from an IoT Device Advisor resource. Requires permission to access UntagResource action. # Arguments -- `resource_arn`: The resource ARN of an IoT Device Advisor resource. +- `resource_arn`: The resource ARN of an IoT Device Advisor resource. This can be + SuiteDefinition ARN or SuiteRun ARN. - `tag_keys`: List of tag keys to remove from the IoT Device Advisor resource. """ @@ -451,31 +482,33 @@ function untag_resource( end """ - update_suite_definition(suite_definition_id) - update_suite_definition(suite_definition_id, params::Dict{String,<:Any}) + update_suite_definition(suite_definition_configuration, suite_definition_id) + update_suite_definition(suite_definition_configuration, suite_definition_id, params::Dict{String,<:Any}) Updates a Device Advisor test suite. Requires permission to access the UpdateSuiteDefinition action. # Arguments +- `suite_definition_configuration`: Updates a Device Advisor test suite with suite + definition configuration. - `suite_definition_id`: Suite definition ID of the test suite to be updated. -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"suiteDefinitionConfiguration"`: Updates a Device Advisor test suite with suite - definition configuration. """ function update_suite_definition( - suiteDefinitionId; aws_config::AbstractAWSConfig=global_aws_config() + suiteDefinitionConfiguration, + suiteDefinitionId; + aws_config::AbstractAWSConfig=global_aws_config(), ) return iotdeviceadvisor( "PATCH", - "/suiteDefinitions/$(suiteDefinitionId)"; + "/suiteDefinitions/$(suiteDefinitionId)", + Dict{String,Any}("suiteDefinitionConfiguration" => suiteDefinitionConfiguration); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end function update_suite_definition( + suiteDefinitionConfiguration, suiteDefinitionId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), @@ -483,7 +516,15 @@ function update_suite_definition( return iotdeviceadvisor( "PATCH", "/suiteDefinitions/$(suiteDefinitionId)", - params; + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "suiteDefinitionConfiguration" => suiteDefinitionConfiguration + ), + params, + ), + ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) diff --git a/src/services/iotfleetwise.jl b/src/services/iotfleetwise.jl index b08de624f4..42b11835ba 100644 --- a/src/services/iotfleetwise.jl +++ b/src/services/iotfleetwise.jl @@ -152,7 +152,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys SEND_ACTIVE_DTCS. If it's not specified, OFF is used. Default: OFF - `"expiryTime"`: (Optional) The time the campaign expires, in seconds since epoch (January 1, 1970 at midnight UTC time). Vehicle data won't be collected after the campaign - expires. Default: 253402243200 (December 31, 9999, 00:00:00 UTC) + expires. Default: 253402214400 (December 31, 9999, 00:00:00 UTC) - `"postTriggerCollectionDuration"`: (Optional) How long (in milliseconds) to collect raw data after a triggering event initiates the collection. If it's not specified, 0 is used. Default: 0 diff --git a/src/services/kinesis_video.jl b/src/services/kinesis_video.jl index d8e44744d3..90cbc49462 100644 --- a/src/services/kinesis_video.jl +++ b/src/services/kinesis_video.jl @@ -271,6 +271,77 @@ function describe_image_generation_configuration( ) end +""" + describe_mapped_resource_configuration() + describe_mapped_resource_configuration(params::Dict{String,<:Any}) + +Returns the most current information about the stream. Either streamName or streamARN +should be provided in the input. Returns the most current information about the stream. The +streamName or streamARN should be provided in the input. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: The maximum number of results to return in the response. +- `"NextToken"`: The token to provide in your next request, to get another batch of results. +- `"StreamARN"`: The Amazon Resource Name (ARN) of the stream. +- `"StreamName"`: The name of the stream. +""" +function describe_mapped_resource_configuration(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return kinesis_video( + "POST", + "/describeMappedResourceConfiguration"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_mapped_resource_configuration( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return kinesis_video( + "POST", + "/describeMappedResourceConfiguration", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + describe_media_storage_configuration() + describe_media_storage_configuration(params::Dict{String,<:Any}) + +Returns the most current information about the channel. Specify the ChannelName or +ChannelARN in the input. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ChannelARN"`: The Amazon Resource Name (ARN) of the channel. +- `"ChannelName"`: The name of the channel. +""" +function describe_media_storage_configuration(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return kinesis_video( + "POST", + "/describeMediaStorageConfiguration"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_media_storage_configuration( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return kinesis_video( + "POST", + "/describeMediaStorageConfiguration", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_notification_configuration() describe_notification_configuration(params::Dict{String,<:Any}) @@ -612,16 +683,16 @@ end start_edge_configuration_update(edge_config) start_edge_configuration_update(edge_config, params::Dict{String,<:Any}) -An asynchronous API that updates a stream’s existing edge configuration. If this API is -invoked for the first time, a new edge configuration will be created for the stream, and -the sync status will be set to SYNCING. The Kinesis Video Stream will sync the stream’s -edge configuration with the Edge Agent IoT Greengrass component that runs on an IoT Hub -Device setup at your premise. The time to sync can vary and depends on the connectivity of -the Hub Device. The SyncStatus will be updated as the edge configuration is acknowledged, -and synced with the Edge Agent. You will have to wait for the sync status to reach a -terminal state such as: IN_SYNC and SYNC_FAILED, before using this API again. If you invoke +An asynchronous API that updates a stream’s existing edge configuration. The Kinesis +Video Stream will sync the stream’s edge configuration with the Edge Agent IoT Greengrass +component that runs on an IoT Hub Device, setup at your premise. The time to sync can vary +and depends on the connectivity of the Hub Device. The SyncStatus will be updated as the +edge configuration is acknowledged, and synced with the Edge Agent. If this API is invoked +for the first time, a new edge configuration will be created for the stream, and the sync +status will be set to SYNCING. You will have to wait for the sync status to reach a +terminal state such as: IN_SYNC, or SYNC_FAILED, before using this API again. If you invoke this API during the syncing process, a ResourceInUseException will be thrown. The -connectivity of the stream's edge configuration and the Edge Agent will be retried for 15 +connectivity of the stream’s edge configuration and the Edge Agent will be retried for 15 minutes. After 15 minutes, the status will transition into the SYNC_FAILED state. # Arguments @@ -952,6 +1023,59 @@ function update_image_generation_configuration( ) end +""" + update_media_storage_configuration(channel_arn, media_storage_configuration) + update_media_storage_configuration(channel_arn, media_storage_configuration, params::Dict{String,<:Any}) + +Associates a SignalingChannel to a stream to store the media. There are two signaling modes +that can specified : If the StorageStatus is disabled, no data will be stored, and the +StreamARN parameter will not be needed. If the StorageStatus is enabled, the data will +be stored in the StreamARN provided. + +# Arguments +- `channel_arn`: The Amazon Resource Name (ARN) of the channel. +- `media_storage_configuration`: A structure that encapsulates, or contains, the media + storage configuration properties. + +""" +function update_media_storage_configuration( + ChannelARN, MediaStorageConfiguration; aws_config::AbstractAWSConfig=global_aws_config() +) + return kinesis_video( + "POST", + "/updateMediaStorageConfiguration", + Dict{String,Any}( + "ChannelARN" => ChannelARN, + "MediaStorageConfiguration" => MediaStorageConfiguration, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_media_storage_configuration( + ChannelARN, + MediaStorageConfiguration, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return kinesis_video( + "POST", + "/updateMediaStorageConfiguration", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ChannelARN" => ChannelARN, + "MediaStorageConfiguration" => MediaStorageConfiguration, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_notification_configuration() update_notification_configuration(params::Dict{String,<:Any}) diff --git a/src/services/kinesis_video_webrtc_storage.jl b/src/services/kinesis_video_webrtc_storage.jl new file mode 100644 index 0000000000..c9ab0c4b37 --- /dev/null +++ b/src/services/kinesis_video_webrtc_storage.jl @@ -0,0 +1,52 @@ +# This file is auto-generated by AWSMetadata.jl +using AWS +using AWS.AWSServices: kinesis_video_webrtc_storage +using AWS.Compat +using AWS.UUIDs + +""" + join_storage_session(channel_arn) + join_storage_session(channel_arn, params::Dict{String,<:Any}) + + Join the ongoing one way-video and/or multi-way audio WebRTC session as a video producing +device for an input channel. If there’s no existing session for the channel, a new +streaming session needs to be created, and the Amazon Resource Name (ARN) of the signaling +channel must be provided. Currently for the SINGLE_MASTER type, a video producing device +is able to ingest both audio and video media into a stream, while viewers can only ingest +audio. Both a video producing device and viewers can join the session first, and wait for +other participants. While participants are having peer to peer conversations through +webRTC, the ingested media session will be stored into the Kinesis Video Stream. Multiple +viewers are able to playback real-time media. Customers can also use existing Kinesis Video +Streams features like HLS or DASH playback, Image generation, and more with ingested WebRTC +media. Assume that only one video producing device client can be associated with a session +for the channel. If more than one client joins the session of a specific channel as a video +producing device, the most recent client request takes precedence. + +# Arguments +- `channel_arn`: The Amazon Resource Name (ARN) of the signaling channel. + +""" +function join_storage_session(channelArn; aws_config::AbstractAWSConfig=global_aws_config()) + return kinesis_video_webrtc_storage( + "POST", + "/joinStorageSession", + Dict{String,Any}("channelArn" => channelArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function join_storage_session( + channelArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return kinesis_video_webrtc_storage( + "POST", + "/joinStorageSession", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("channelArn" => channelArn), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/license_manager_linux_subscriptions.jl b/src/services/license_manager_linux_subscriptions.jl new file mode 100644 index 0000000000..f9c028f39e --- /dev/null +++ b/src/services/license_manager_linux_subscriptions.jl @@ -0,0 +1,172 @@ +# This file is auto-generated by AWSMetadata.jl +using AWS +using AWS.AWSServices: license_manager_linux_subscriptions +using AWS.Compat +using AWS.UUIDs + +""" + get_service_settings() + get_service_settings(params::Dict{String,<:Any}) + +Lists the Linux subscriptions service settings. + +""" +function get_service_settings(; aws_config::AbstractAWSConfig=global_aws_config()) + return license_manager_linux_subscriptions( + "POST", + "/subscription/GetServiceSettings"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_service_settings( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return license_manager_linux_subscriptions( + "POST", + "/subscription/GetServiceSettings", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_linux_subscription_instances() + list_linux_subscription_instances(params::Dict{String,<:Any}) + +Lists the running Amazon EC2 instances that were discovered with commercial Linux +subscriptions. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Filters"`: An array of structures that you can use to filter the results to those that + match one or more sets of key-value pairs that you specify. For example, you can filter by + the name of AmiID with an optional operator to see subscriptions that match, partially + match, or don't match a certain Amazon Machine Image (AMI) ID. The valid names for this + filter are: AmiID InstanceID AccountID Status Region UsageOperation + ProductCode InstanceType The valid Operators for this filter are: contains + equals Notequal +- `"MaxResults"`: Maximum number of results to return in a single call. +- `"NextToken"`: Token for the next set of results. +""" +function list_linux_subscription_instances(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return license_manager_linux_subscriptions( + "POST", + "/subscription/ListLinuxSubscriptionInstances"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_linux_subscription_instances( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return license_manager_linux_subscriptions( + "POST", + "/subscription/ListLinuxSubscriptionInstances", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_linux_subscriptions() + list_linux_subscriptions(params::Dict{String,<:Any}) + +Lists the Linux subscriptions that have been discovered. If you have linked your +organization, the returned results will include data aggregated across your accounts in +Organizations. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Filters"`: An array of structures that you can use to filter the results to those that + match one or more sets of key-value pairs that you specify. For example, you can filter by + the name of Subscription with an optional operator to see subscriptions that match, + partially match, or don't match a certain subscription's name. The valid names for this + filter are: Subscription The valid Operators for this filter are: contains + equals Notequal +- `"MaxResults"`: Maximum number of results to return in a single call. +- `"NextToken"`: Token for the next set of results. +""" +function list_linux_subscriptions(; aws_config::AbstractAWSConfig=global_aws_config()) + return license_manager_linux_subscriptions( + "POST", + "/subscription/ListLinuxSubscriptions"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_linux_subscriptions( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return license_manager_linux_subscriptions( + "POST", + "/subscription/ListLinuxSubscriptions", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_service_settings(linux_subscriptions_discovery, linux_subscriptions_discovery_settings) + update_service_settings(linux_subscriptions_discovery, linux_subscriptions_discovery_settings, params::Dict{String,<:Any}) + +Updates the service settings for Linux subscriptions. + +# Arguments +- `linux_subscriptions_discovery`: Describes if the discovery of Linux subscriptions is + enabled. +- `linux_subscriptions_discovery_settings`: The settings defined for Linux subscriptions + discovery. The settings include if Organizations integration has been enabled, and which + Regions data will be aggregated from. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AllowUpdate"`: Describes if updates are allowed to the service settings for Linux + subscriptions. If you allow updates, you can aggregate Linux subscription data in more than + one home Region. +""" +function update_service_settings( + LinuxSubscriptionsDiscovery, + LinuxSubscriptionsDiscoverySettings; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return license_manager_linux_subscriptions( + "POST", + "/subscription/UpdateServiceSettings", + Dict{String,Any}( + "LinuxSubscriptionsDiscovery" => LinuxSubscriptionsDiscovery, + "LinuxSubscriptionsDiscoverySettings" => LinuxSubscriptionsDiscoverySettings, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_service_settings( + LinuxSubscriptionsDiscovery, + LinuxSubscriptionsDiscoverySettings, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return license_manager_linux_subscriptions( + "POST", + "/subscription/UpdateServiceSettings", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "LinuxSubscriptionsDiscovery" => LinuxSubscriptionsDiscovery, + "LinuxSubscriptionsDiscoverySettings" => + LinuxSubscriptionsDiscoverySettings, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/lightsail.jl b/src/services/lightsail.jl index 20dfbd203b..24ec335547 100644 --- a/src/services/lightsail.jl +++ b/src/services/lightsail.jl @@ -1163,10 +1163,7 @@ operation supports tag-based access control via request tags. For more informati Amazon Lightsail Developer Guide. # Arguments -- `domain_name`: The domain name to manage (e.g., example.com). You cannot register a new - domain name using Lightsail. You must register a domain name using Amazon Route 53 or - another domain name registrar. If you have already registered your domain, you can enter - its name in this parameter to manage the DNS records for that domain using Lightsail. +- `domain_name`: The domain name to manage (e.g., example.com). # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -2268,8 +2265,8 @@ Lightsail Developer Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"forceDeleteAddOns"`: A Boolean value to indicate whether to delete the enabled add-ons - for the disk. +- `"forceDeleteAddOns"`: A Boolean value to indicate whether to delete all add-ons for the + disk. """ function delete_disk(diskName; aws_config::AbstractAWSConfig=global_aws_config()) return lightsail( @@ -2455,8 +2452,8 @@ more information, see the Amazon Lightsail Developer Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"forceDeleteAddOns"`: A Boolean value to indicate whether to delete the enabled add-ons - for the disk. +- `"forceDeleteAddOns"`: A Boolean value to indicate whether to delete all add-ons for the + instance. """ function delete_instance(instanceName; aws_config::AbstractAWSConfig=global_aws_config()) return lightsail( diff --git a/src/services/lookoutequipment.jl b/src/services/lookoutequipment.jl index 89fbb3e3c6..9990e5d1fe 100644 --- a/src/services/lookoutequipment.jl +++ b/src/services/lookoutequipment.jl @@ -967,6 +967,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"ModelName"`: The name of the ML model used by the inference scheduler to be listed. - `"NextToken"`: An opaque pagination token indicating where to continue the listing of inference schedulers. +- `"Status"`: Specifies the current status of the inference schedulers to list. """ function list_inference_schedulers(; aws_config::AbstractAWSConfig=global_aws_config()) return lookoutequipment( diff --git a/src/services/memorydb.jl b/src/services/memorydb.jl index a47382ae15..a2874c1158 100644 --- a/src/services/memorydb.jl +++ b/src/services/memorydb.jl @@ -872,6 +872,94 @@ function describe_parameters( ) end +""" + describe_reserved_nodes() + describe_reserved_nodes(params::Dict{String,<:Any}) + +Returns information about reserved nodes for this account, or about a specified reserved +node. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Duration"`: The duration filter value, specified in years or seconds. Use this + parameter to show only reservations for this duration. +- `"MaxResults"`: The maximum number of records to include in the response. If more records + exist than the specified MaxRecords value, a marker is included in the response so that the + remaining results can be retrieved. +- `"NextToken"`: An optional marker returned from a prior request. Use this marker for + pagination of results from this operation. If this parameter is specified, the response + includes only records beyond the marker, up to the value specified by MaxRecords. +- `"NodeType"`: The node type filter value. Use this parameter to show only those + reservations matching the specified node type. For more information, see Supported node + types. +- `"OfferingType"`: The offering type filter value. Use this parameter to show only the + available offerings matching the specified offering type. Valid values: \"All + Upfront\"|\"Partial Upfront\"| \"No Upfront\" +- `"ReservationId"`: The reserved node identifier filter value. Use this parameter to show + only the reservation that matches the specified reservation ID. +- `"ReservedNodesOfferingId"`: The offering identifier filter value. Use this parameter to + show only purchased reservations matching the specified offering identifier. +""" +function describe_reserved_nodes(; aws_config::AbstractAWSConfig=global_aws_config()) + return memorydb( + "DescribeReservedNodes"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function describe_reserved_nodes( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return memorydb( + "DescribeReservedNodes", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + describe_reserved_nodes_offerings() + describe_reserved_nodes_offerings(params::Dict{String,<:Any}) + +Lists available reserved node offerings. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Duration"`: Duration filter value, specified in years or seconds. Use this parameter to + show only reservations for a given duration. +- `"MaxResults"`: The maximum number of records to include in the response. If more records + exist than the specified MaxRecords value, a marker is included in the response so that the + remaining results can be retrieved. +- `"NextToken"`: An optional marker returned from a prior request. Use this marker for + pagination of results from this operation. If this parameter is specified, the response + includes only records beyond the marker, up to the value specified by MaxRecords. +- `"NodeType"`: The node type for the reserved nodes. For more information, see Supported + node types. +- `"OfferingType"`: The offering type filter value. Use this parameter to show only the + available offerings matching the specified offering type. Valid values: \"All + Upfront\"|\"Partial Upfront\"| \"No Upfront\" +- `"ReservedNodesOfferingId"`: The offering identifier filter value. Use this parameter to + show only the available offering that matches the specified reservation identifier. +""" +function describe_reserved_nodes_offerings(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return memorydb( + "DescribeReservedNodesOfferings"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_reserved_nodes_offerings( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return memorydb( + "DescribeReservedNodesOfferings", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_service_updates() describe_service_updates(params::Dict{String,<:Any}) @@ -1129,6 +1217,52 @@ function list_tags( ) end +""" + purchase_reserved_nodes_offering(reserved_nodes_offering_id) + purchase_reserved_nodes_offering(reserved_nodes_offering_id, params::Dict{String,<:Any}) + +Allows you to purchase a reserved node offering. Reserved nodes are not eligible for +cancellation and are non-refundable. + +# Arguments +- `reserved_nodes_offering_id`: The ID of the reserved node offering to purchase. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"NodeCount"`: The number of node instances to reserve. +- `"ReservationId"`: A customer-specified identifier to track this reservation. +- `"Tags"`: A list of tags to be added to this resource. A tag is a key-value pair. A tag + key must be accompanied by a tag value, although null is accepted. +""" +function purchase_reserved_nodes_offering( + ReservedNodesOfferingId; aws_config::AbstractAWSConfig=global_aws_config() +) + return memorydb( + "PurchaseReservedNodesOffering", + Dict{String,Any}("ReservedNodesOfferingId" => ReservedNodesOfferingId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function purchase_reserved_nodes_offering( + ReservedNodesOfferingId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return memorydb( + "PurchaseReservedNodesOffering", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("ReservedNodesOfferingId" => ReservedNodesOfferingId), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ reset_parameter_group(parameter_group_name) reset_parameter_group(parameter_group_name, params::Dict{String,<:Any}) diff --git a/src/services/mwaa.jl b/src/services/mwaa.jl index 170a2283a8..f207839a8e 100644 --- a/src/services/mwaa.jl +++ b/src/services/mwaa.jl @@ -61,9 +61,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys configuration options you want to attach to your environment. To learn more, see Apache Airflow configuration options. - `"AirflowVersion"`: The Apache Airflow version for your environment. If no value is - specified, it defaults to the latest version. Valid values: 1.10.12, 2.0.2, and 2.2.2. To - learn more, see Apache Airflow versions on Amazon Managed Workflows for Apache Airflow - (MWAA). + specified, it defaults to the latest version. Valid values: 1.10.12, 2.0.2, 2.2.2, and + 2.4.3. For more information, see Apache Airflow versions on Amazon Managed Workflows for + Apache Airflow (MWAA). - `"EnvironmentClass"`: The environment class type. Valid values: mw1.small, mw1.medium, mw1.large. To learn more, see Amazon MWAA environment class. - `"KmsKey"`: The Amazon Web Services Key Management Service (KMS) key to encrypt the data @@ -440,7 +440,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys configuration options you want to attach to your environment. To learn more, see Apache Airflow configuration options. - `"AirflowVersion"`: The Apache Airflow version for your environment. If no value is - specified, defaults to the latest version. Valid values: 1.10.12, 2.0.2, and 2.2.2. + specified, defaults to the latest version. Valid values: 1.10.12, 2.0.2, 2.2.2, and 2.4.3. - `"DagS3Path"`: The relative path to the DAGs folder on your Amazon S3 bucket. For example, dags. To learn more, see Adding or updating DAGs. - `"EnvironmentClass"`: The environment class type. Valid values: mw1.small, mw1.medium, diff --git a/src/services/nimble.jl b/src/services/nimble.jl index 4315d5dfc9..41b6097906 100644 --- a/src/services/nimble.jl +++ b/src/services/nimble.jl @@ -16,8 +16,9 @@ Accept EULAs. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"X-Amz-Client-Token"`: Unique, case-sensitive identifier that you provide to ensure the - idempotency of the request. If you don’t specify a client token, the AWS SDK - automatically generates a client token and uses it for the request to ensure idempotency. + idempotency of the request. If you don’t specify a client token, the Amazon Web Services + SDK automatically generates a client token and uses it for the request to ensure + idempotency. - `"eulaIds"`: The EULA ID. """ function accept_eulas(studioId; aws_config::AbstractAWSConfig=global_aws_config()) @@ -67,10 +68,11 @@ Create a launch profile. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"X-Amz-Client-Token"`: Unique, case-sensitive identifier that you provide to ensure the - idempotency of the request. If you don’t specify a client token, the AWS SDK - automatically generates a client token and uses it for the request to ensure idempotency. + idempotency of the request. If you don’t specify a client token, the Amazon Web Services + SDK automatically generates a client token and uses it for the request to ensure + idempotency. - `"description"`: The description. -- `"tags"`: A collection of labels, in the form of key:value pairs, that apply to this +- `"tags"`: A collection of labels, in the form of key-value pairs, that apply to this resource. """ function create_launch_profile( @@ -143,10 +145,11 @@ Creates a streaming image resource in a studio. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"X-Amz-Client-Token"`: Unique, case-sensitive identifier that you provide to ensure the - idempotency of the request. If you don’t specify a client token, the AWS SDK - automatically generates a client token and uses it for the request to ensure idempotency. + idempotency of the request. If you don’t specify a client token, the Amazon Web Services + SDK automatically generates a client token and uses it for the request to ensure + idempotency. - `"description"`: A human-readable description of the streaming image. -- `"tags"`: A collection of labels, in the form of key:value pairs, that apply to this +- `"tags"`: A collection of labels, in the form of key-value pairs, that apply to this resource. """ function create_streaming_image( @@ -191,40 +194,45 @@ function create_streaming_image( end """ - create_streaming_session(studio_id) - create_streaming_session(studio_id, params::Dict{String,<:Any}) + create_streaming_session(launch_profile_id, studio_id) + create_streaming_session(launch_profile_id, studio_id, params::Dict{String,<:Any}) Creates a streaming session in a studio. After invoking this operation, you must poll -GetStreamingSession until the streaming session is in state READY. +GetStreamingSession until the streaming session is in the READY state. # Arguments +- `launch_profile_id`: The ID of the launch profile used to control access from the + streaming session. - `studio_id`: The studio ID. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"X-Amz-Client-Token"`: Unique, case-sensitive identifier that you provide to ensure the - idempotency of the request. If you don’t specify a client token, the AWS SDK - automatically generates a client token and uses it for the request to ensure idempotency. + idempotency of the request. If you don’t specify a client token, the Amazon Web Services + SDK automatically generates a client token and uses it for the request to ensure + idempotency. - `"ec2InstanceType"`: The EC2 Instance type used for the streaming session. -- `"launchProfileId"`: The launch profile ID. - `"ownedBy"`: The user ID of the user that owns the streaming session. The user that owns the session will be logging into the session and interacting with the virtual workstation. - `"streamingImageId"`: The ID of the streaming image. -- `"tags"`: A collection of labels, in the form of key:value pairs, that apply to this +- `"tags"`: A collection of labels, in the form of key-value pairs, that apply to this resource. """ function create_streaming_session( - studioId; aws_config::AbstractAWSConfig=global_aws_config() + launchProfileId, studioId; aws_config::AbstractAWSConfig=global_aws_config() ) return nimble( "POST", "/2020-08-01/studios/$(studioId)/streaming-sessions", - Dict{String,Any}("X-Amz-Client-Token" => string(uuid4())); + Dict{String,Any}( + "launchProfileId" => launchProfileId, "X-Amz-Client-Token" => string(uuid4()) + ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end function create_streaming_session( + launchProfileId, studioId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), @@ -234,7 +242,12 @@ function create_streaming_session( "/2020-08-01/studios/$(studioId)/streaming-sessions", Dict{String,Any}( mergewith( - _merge, Dict{String,Any}("X-Amz-Client-Token" => string(uuid4())), params + _merge, + Dict{String,Any}( + "launchProfileId" => launchProfileId, + "X-Amz-Client-Token" => string(uuid4()), + ), + params, ), ); aws_config=aws_config, @@ -248,7 +261,7 @@ end Creates a streaming session stream for a streaming session. After invoking this API, invoke GetStreamingSessionStream with the returned streamId to poll the resource until it is in -state READY. +the READY state. # Arguments - `session_id`: The streaming session ID. @@ -257,8 +270,9 @@ state READY. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"X-Amz-Client-Token"`: Unique, case-sensitive identifier that you provide to ensure the - idempotency of the request. If you don’t specify a client token, the AWS SDK - automatically generates a client token and uses it for the request to ensure idempotency. + idempotency of the request. If you don’t specify a client token, the Amazon Web Services + SDK automatically generates a client token and uses it for the request to ensure + idempotency. - `"expirationInSeconds"`: The expiration time in seconds. """ function create_streaming_session_stream( @@ -295,10 +309,10 @@ end create_studio(admin_role_arn, display_name, studio_name, user_role_arn) create_studio(admin_role_arn, display_name, studio_name, user_role_arn, params::Dict{String,<:Any}) -Create a new Studio. When creating a Studio, two IAM roles must be provided: the admin role -and the user Role. These roles are assumed by your users when they log in to the Nimble +Create a new studio. When creating a studio, two IAM roles must be provided: the admin role +and the user role. These roles are assumed by your users when they log in to the Nimble Studio portal. The user role must have the AmazonNimbleStudio-StudioUser managed policy -attached for the portal to function properly. The Admin Role must have the +attached for the portal to function properly. The admin role must have the AmazonNimbleStudio-StudioAdmin managed policy attached for the portal to function properly. You may optionally specify a KMS key in the StudioEncryptionConfiguration. In Nimble Studio, resource names, descriptions, initialization scripts, and other data you provide @@ -311,21 +325,22 @@ will no longer be accessible to your portal users. If you delete the studio KMS studio will no longer be accessible. # Arguments -- `admin_role_arn`: The IAM role that Studio Admins will assume when logging in to the +- `admin_role_arn`: The IAM role that studio admins will assume when logging in to the Nimble Studio portal. - `display_name`: A friendly name for the studio. - `studio_name`: The studio name that is used in the URL of the Nimble Studio portal when accessed by Nimble Studio users. -- `user_role_arn`: The IAM role that Studio Users will assume when logging in to the Nimble +- `user_role_arn`: The IAM role that studio users will assume when logging in to the Nimble Studio portal. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"X-Amz-Client-Token"`: Unique, case-sensitive identifier that you provide to ensure the - idempotency of the request. If you don’t specify a client token, the AWS SDK - automatically generates a client token and uses it for the request to ensure idempotency. + idempotency of the request. If you don’t specify a client token, the Amazon Web Services + SDK automatically generates a client token and uses it for the request to ensure + idempotency. - `"studioEncryptionConfiguration"`: The studio encryption configuration. -- `"tags"`: A collection of labels, in the form of key:value pairs, that apply to this +- `"tags"`: A collection of labels, in the form of key-value pairs, that apply to this resource. """ function create_studio( @@ -392,21 +407,22 @@ Creates a studio component resource. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"X-Amz-Client-Token"`: Unique, case-sensitive identifier that you provide to ensure the - idempotency of the request. If you don’t specify a client token, the AWS SDK - automatically generates a client token and uses it for the request to ensure idempotency. + idempotency of the request. If you don’t specify a client token, the Amazon Web Services + SDK automatically generates a client token and uses it for the request to ensure + idempotency. - `"configuration"`: The configuration of the studio component, based on component type. - `"description"`: The description. - `"ec2SecurityGroupIds"`: The EC2 security groups that control access to the studio component. - `"initializationScripts"`: Initialization scripts for studio components. - `"runtimeRoleArn"`: An IAM role attached to a Studio Component that gives the studio - component access to AWS resources at anytime while the instance is running. + component access to Amazon Web Services resources at anytime while the instance is running. - `"scriptParameters"`: Parameters for the studio component scripts. - `"secureInitializationRoleArn"`: An IAM role attached to Studio Component when the system - initialization script runs which give the studio component access to AWS resources when the - system initialization script runs. + initialization script runs which give the studio component access to Amazon Web Services + resources when the system initialization script runs. - `"subtype"`: The specific subtype of a studio component. -- `"tags"`: A collection of labels, in the form of key:value pairs, that apply to this +- `"tags"`: A collection of labels, in the form of key-value pairs, that apply to this resource. """ function create_studio_component( @@ -453,14 +469,16 @@ end Permanently delete a launch profile. # Arguments -- `launch_profile_id`: The Launch Profile ID. +- `launch_profile_id`: The ID of the launch profile used to control access from the + streaming session. - `studio_id`: The studio ID. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"X-Amz-Client-Token"`: Unique, case-sensitive identifier that you provide to ensure the - idempotency of the request. If you don’t specify a client token, the AWS SDK - automatically generates a client token and uses it for the request to ensure idempotency. + idempotency of the request. If you don’t specify a client token, the Amazon Web Services + SDK automatically generates a client token and uses it for the request to ensure + idempotency. """ function delete_launch_profile( launchProfileId, studioId; aws_config::AbstractAWSConfig=global_aws_config() @@ -499,15 +517,17 @@ end Delete a user from launch profile membership. # Arguments -- `launch_profile_id`: The Launch Profile ID. +- `launch_profile_id`: The ID of the launch profile used to control access from the + streaming session. - `principal_id`: The principal ID. This currently supports a IAM Identity Center UserId. - `studio_id`: The studio ID. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"X-Amz-Client-Token"`: Unique, case-sensitive identifier that you provide to ensure the - idempotency of the request. If you don’t specify a client token, the AWS SDK - automatically generates a client token and uses it for the request to ensure idempotency. + idempotency of the request. If you don’t specify a client token, the Amazon Web Services + SDK automatically generates a client token and uses it for the request to ensure + idempotency. """ function delete_launch_profile_member( launchProfileId, @@ -556,8 +576,9 @@ Delete streaming image. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"X-Amz-Client-Token"`: Unique, case-sensitive identifier that you provide to ensure the - idempotency of the request. If you don’t specify a client token, the AWS SDK - automatically generates a client token and uses it for the request to ensure idempotency. + idempotency of the request. If you don’t specify a client token, the Amazon Web Services + SDK automatically generates a client token and uses it for the request to ensure + idempotency. """ function delete_streaming_image( streamingImageId, studioId; aws_config::AbstractAWSConfig=global_aws_config() @@ -604,8 +625,9 @@ count against your streaming session quota until it is marked DELETED. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"X-Amz-Client-Token"`: Unique, case-sensitive identifier that you provide to ensure the - idempotency of the request. If you don’t specify a client token, the AWS SDK - automatically generates a client token and uses it for the request to ensure idempotency. + idempotency of the request. If you don’t specify a client token, the Amazon Web Services + SDK automatically generates a client token and uses it for the request to ensure + idempotency. """ function delete_streaming_session( sessionId, studioId; aws_config::AbstractAWSConfig=global_aws_config() @@ -649,8 +671,9 @@ Delete a studio resource. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"X-Amz-Client-Token"`: Unique, case-sensitive identifier that you provide to ensure the - idempotency of the request. If you don’t specify a client token, the AWS SDK - automatically generates a client token and uses it for the request to ensure idempotency. + idempotency of the request. If you don’t specify a client token, the Amazon Web Services + SDK automatically generates a client token and uses it for the request to ensure + idempotency. """ function delete_studio(studioId; aws_config::AbstractAWSConfig=global_aws_config()) return nimble( @@ -692,8 +715,9 @@ Deletes a studio component resource. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"X-Amz-Client-Token"`: Unique, case-sensitive identifier that you provide to ensure the - idempotency of the request. If you don’t specify a client token, the AWS SDK - automatically generates a client token and uses it for the request to ensure idempotency. + idempotency of the request. If you don’t specify a client token, the Amazon Web Services + SDK automatically generates a client token and uses it for the request to ensure + idempotency. """ function delete_studio_component( studioComponentId, studioId; aws_config::AbstractAWSConfig=global_aws_config() @@ -738,8 +762,9 @@ Delete a user from studio membership. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"X-Amz-Client-Token"`: Unique, case-sensitive identifier that you provide to ensure the - idempotency of the request. If you don’t specify a client token, the AWS SDK - automatically generates a client token and uses it for the request to ensure idempotency. + idempotency of the request. If you don’t specify a client token, the Amazon Web Services + SDK automatically generates a client token and uses it for the request to ensure + idempotency. """ function delete_studio_member( principalId, studioId; aws_config::AbstractAWSConfig=global_aws_config() @@ -775,7 +800,7 @@ end get_eula(eula_id) get_eula(eula_id, params::Dict{String,<:Any}) -Get Eula. +Get EULA. # Arguments - `eula_id`: The EULA ID. @@ -808,7 +833,8 @@ end Get a launch profile. # Arguments -- `launch_profile_id`: The Launch Profile ID. +- `launch_profile_id`: The ID of the launch profile used to control access from the + streaming session. - `studio_id`: The studio ID. """ @@ -847,7 +873,8 @@ description of all studio components used by the launch profiles, and the name a description of streaming images that can be used with this launch profile. # Arguments -- `launch_profile_id`: The Launch Profile ID. +- `launch_profile_id`: The ID of the launch profile used to control access from the + streaming session. - `studio_id`: The studio ID. """ @@ -883,11 +910,12 @@ end Get a launch profile initialization. # Arguments -- `launch_profile_id`: The Launch Profile ID. +- `launch_profile_id`: The ID of the launch profile used to control access from the + streaming session. - `launch_profile_protocol_versions`: The launch profile protocol versions supported by the client. - `launch_purpose`: The launch purpose. -- `platform`: The platform where this Launch Profile will be used, either WINDOWS or LINUX. +- `platform`: The platform where this Launch Profile will be used, either Windows or Linux. - `studio_id`: The studio ID. """ @@ -946,7 +974,8 @@ end Get a user persona in launch profile membership. # Arguments -- `launch_profile_id`: The Launch Profile ID. +- `launch_profile_id`: The ID of the launch profile used to control access from the + streaming session. - `principal_id`: The principal ID. This currently supports a IAM Identity Center UserId. - `studio_id`: The studio ID. @@ -1053,13 +1082,50 @@ function get_streaming_session( ) end +""" + get_streaming_session_backup(backup_id, studio_id) + get_streaming_session_backup(backup_id, studio_id, params::Dict{String,<:Any}) + +Gets StreamingSessionBackup resource. Invoke this operation to poll for a streaming session +backup while stopping a streaming session. + +# Arguments +- `backup_id`: The ID of the backup. +- `studio_id`: The studio ID. + +""" +function get_streaming_session_backup( + backupId, studioId; aws_config::AbstractAWSConfig=global_aws_config() +) + return nimble( + "GET", + "/2020-08-01/studios/$(studioId)/streaming-session-backups/$(backupId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_streaming_session_backup( + backupId, + studioId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return nimble( + "GET", + "/2020-08-01/studios/$(studioId)/streaming-session-backups/$(backupId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_streaming_session_stream(session_id, stream_id, studio_id) get_streaming_session_stream(session_id, stream_id, studio_id, params::Dict{String,<:Any}) Gets a StreamingSessionStream for a streaming session. Invoke this operation to poll the resource after invoking CreateStreamingSessionStream. After the StreamingSessionStream -changes to the state READY, the url property will contain a stream to be used with the DCV +changes to the READY state, the url property will contain a stream to be used with the DCV streaming client. # Arguments @@ -1098,7 +1164,7 @@ end get_studio(studio_id) get_studio(studio_id, params::Dict{String,<:Any}) -Get a Studio resource. +Get a studio resource. # Arguments - `studio_id`: The studio ID. @@ -1202,7 +1268,7 @@ end list_eula_acceptances(studio_id) list_eula_acceptances(studio_id, params::Dict{String,<:Any}) -List Eula Acceptances. +List EULA acceptances. # Arguments - `studio_id`: The studio ID. @@ -1210,7 +1276,8 @@ List Eula Acceptances. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"eulaIds"`: The list of EULA IDs that have been previously accepted. -- `"nextToken"`: The token to request the next page of results. +- `"nextToken"`: The token for the next set of results, or null if there are no more + results. """ function list_eula_acceptances(studioId; aws_config::AbstractAWSConfig=global_aws_config()) return nimble( @@ -1238,12 +1305,13 @@ end list_eulas() list_eulas(params::Dict{String,<:Any}) -List Eulas. +List EULAs. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"eulaIds"`: The list of EULA IDs that should be returned -- `"nextToken"`: The token to request the next page of results. +- `"nextToken"`: The token for the next set of results, or null if there are no more + results. """ function list_eulas(; aws_config::AbstractAWSConfig=global_aws_config()) return nimble( @@ -1269,13 +1337,15 @@ end Get all users in a given launch profile membership. # Arguments -- `launch_profile_id`: The Launch Profile ID. +- `launch_profile_id`: The ID of the launch profile used to control access from the + streaming session. - `studio_id`: The studio ID. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"maxResults"`: The max number of results to return in the response. -- `"nextToken"`: The token to request the next page of results. +- `"nextToken"`: The token for the next set of results, or null if there are no more + results. """ function list_launch_profile_members( launchProfileId, studioId; aws_config::AbstractAWSConfig=global_aws_config() @@ -1314,7 +1384,8 @@ List all the launch profiles a studio. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"maxResults"`: The max number of results to return in the response. -- `"nextToken"`: The token to request the next page of results. +- `"nextToken"`: The token for the next set of results, or null if there are no more + results. - `"principalId"`: The principal ID. This currently supports a IAM Identity Center UserId. - `"states"`: Filter this request to launch profiles in any of the given states. """ @@ -1353,7 +1424,8 @@ in your studio. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"nextToken"`: The token to request the next page of results. +- `"nextToken"`: The token for the next set of results, or null if there are no more + results. - `"owner"`: Filter this request to streaming images with the given owner """ function list_streaming_images(studioId; aws_config::AbstractAWSConfig=global_aws_config()) @@ -1378,6 +1450,45 @@ function list_streaming_images( ) end +""" + list_streaming_session_backups(studio_id) + list_streaming_session_backups(studio_id, params::Dict{String,<:Any}) + +Lists the backups of a streaming session in a studio. + +# Arguments +- `studio_id`: The studio ID. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"nextToken"`: The token for the next set of results, or null if there are no more + results. +- `"ownedBy"`: The user ID of the user that owns the streaming session. +""" +function list_streaming_session_backups( + studioId; aws_config::AbstractAWSConfig=global_aws_config() +) + return nimble( + "GET", + "/2020-08-01/studios/$(studioId)/streaming-session-backups"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_streaming_session_backups( + studioId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return nimble( + "GET", + "/2020-08-01/studios/$(studioId)/streaming-session-backups", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_streaming_sessions(studio_id) list_streaming_sessions(studio_id, params::Dict{String,<:Any}) @@ -1390,7 +1501,8 @@ Lists the streaming sessions in a studio. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"createdBy"`: Filters the request to streaming sessions created by the given user. -- `"nextToken"`: The token to request the next page of results. +- `"nextToken"`: The token for the next set of results, or null if there are no more + results. - `"ownedBy"`: Filters the request to streaming session owned by the given user - `"sessionIds"`: Filters the request to only the provided session IDs. """ @@ -1430,7 +1542,8 @@ Lists the StudioComponents in a studio. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"maxResults"`: The max number of results to return in the response. -- `"nextToken"`: The token to request the next page of results. +- `"nextToken"`: The token for the next set of results, or null if there are no more + results. - `"states"`: Filters the request to studio components that are in one of the given states. - `"types"`: Filters the request to studio components that are of one of the given types. """ @@ -1468,7 +1581,8 @@ Get all users in a given studio membership. ListStudioMembers only returns adm # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"maxResults"`: The max number of results to return in the response. -- `"nextToken"`: The token to request the next page of results. +- `"nextToken"`: The token for the next set of results, or null if there are no more + results. """ function list_studio_members(studioId; aws_config::AbstractAWSConfig=global_aws_config()) return nimble( @@ -1496,12 +1610,13 @@ end list_studios() list_studios(params::Dict{String,<:Any}) -List studios in your Amazon Web Services account in the requested Amazon Web Services +List studios in your Amazon Web Services accounts in the requested Amazon Web Services Region. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"nextToken"`: The token to request the next page of results. +- `"nextToken"`: The token for the next set of results, or null if there are no more + results. """ function list_studios(; aws_config::AbstractAWSConfig=global_aws_config()) return nimble( @@ -1567,15 +1682,17 @@ Add/update users with given persona to launch profile membership. # Arguments - `identity_store_id`: The ID of the identity store. -- `launch_profile_id`: The Launch Profile ID. +- `launch_profile_id`: The ID of the launch profile used to control access from the + streaming session. - `members`: A list of members. - `studio_id`: The studio ID. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"X-Amz-Client-Token"`: Unique, case-sensitive identifier that you provide to ensure the - idempotency of the request. If you don’t specify a client token, the AWS SDK - automatically generates a client token and uses it for the request to ensure idempotency. + idempotency of the request. If you don’t specify a client token, the Amazon Web Services + SDK automatically generates a client token and uses it for the request to ensure + idempotency. """ function put_launch_profile_members( identityStoreId, @@ -1637,8 +1754,9 @@ Add/update users with given persona to studio membership. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"X-Amz-Client-Token"`: Unique, case-sensitive identifier that you provide to ensure the - idempotency of the request. If you don’t specify a client token, the AWS SDK - automatically generates a client token and uses it for the request to ensure idempotency. + idempotency of the request. If you don’t specify a client token, the Amazon Web Services + SDK automatically generates a client token and uses it for the request to ensure + idempotency. """ function put_studio_members( identityStoreId, members, studioId; aws_config::AbstractAWSConfig=global_aws_config() @@ -1685,7 +1803,7 @@ end start_streaming_session(session_id, studio_id) start_streaming_session(session_id, studio_id, params::Dict{String,<:Any}) - Transitions sessions from the STOPPED state into the READY state. The START_IN_PROGRESS +Transitions sessions from the STOPPED state into the READY state. The START_IN_PROGRESS state is the intermediate state between the STOPPED and READY states. # Arguments @@ -1695,8 +1813,10 @@ state is the intermediate state between the STOPPED and READY states. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"X-Amz-Client-Token"`: Unique, case-sensitive identifier that you provide to ensure the - idempotency of the request. If you don’t specify a client token, the AWS SDK - automatically generates a client token and uses it for the request to ensure idempotency. + idempotency of the request. If you don’t specify a client token, the Amazon Web Services + SDK automatically generates a client token and uses it for the request to ensure + idempotency. +- `"backupId"`: The ID of the backup. """ function start_streaming_session( sessionId, studioId; aws_config::AbstractAWSConfig=global_aws_config() @@ -1746,8 +1866,9 @@ administrators and users to your studio. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"X-Amz-Client-Token"`: Unique, case-sensitive identifier that you provide to ensure the - idempotency of the request. If you don’t specify a client token, the AWS SDK - automatically generates a client token and uses it for the request to ensure idempotency. + idempotency of the request. If you don’t specify a client token, the Amazon Web Services + SDK automatically generates a client token and uses it for the request to ensure + idempotency. """ function start_studio_ssoconfiguration_repair( studioId; aws_config::AbstractAWSConfig=global_aws_config() @@ -1792,8 +1913,11 @@ state is the intermediate state between the READY and STOPPED states. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"X-Amz-Client-Token"`: Unique, case-sensitive identifier that you provide to ensure the - idempotency of the request. If you don’t specify a client token, the AWS SDK - automatically generates a client token and uses it for the request to ensure idempotency. + idempotency of the request. If you don’t specify a client token, the Amazon Web Services + SDK automatically generates a client token and uses it for the request to ensure + idempotency. +- `"volumeRetentionMode"`: Adds additional instructions to a streaming session stop action + to either retain the EBS volumes or delete the EBS volumes. """ function stop_streaming_session( sessionId, studioId; aws_config::AbstractAWSConfig=global_aws_config() @@ -1832,11 +1956,11 @@ end Creates tags for a resource, given its ARN. # Arguments -- `resource_arn`: The Amazon Resource Name (ARN) of the resource you want to add tags to. +- `resource_arn`: The Amazon Resource Name (ARN) of the resource you want to add tags to. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"tags"`: A collection of labels, in the form of key:value pairs, that apply to this +- `"tags"`: A collection of labels, in the form of key-value pairs, that apply to this resource. """ function tag_resource(resourceArn; aws_config::AbstractAWSConfig=global_aws_config()) @@ -1906,14 +2030,16 @@ end Update a launch profile. # Arguments -- `launch_profile_id`: The Launch Profile ID. +- `launch_profile_id`: The ID of the launch profile used to control access from the + streaming session. - `studio_id`: The studio ID. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"X-Amz-Client-Token"`: Unique, case-sensitive identifier that you provide to ensure the - idempotency of the request. If you don’t specify a client token, the AWS SDK - automatically generates a client token and uses it for the request to ensure idempotency. + idempotency of the request. If you don’t specify a client token, the Amazon Web Services + SDK automatically generates a client token and uses it for the request to ensure + idempotency. - `"description"`: The description. - `"launchProfileProtocolVersions"`: The version number of the protocol that is used by the launch profile. The only valid version is \"2021-03-31\". @@ -1959,7 +2085,8 @@ end Update a user persona in launch profile membership. # Arguments -- `launch_profile_id`: The Launch Profile ID. +- `launch_profile_id`: The ID of the launch profile used to control access from the + streaming session. - `persona`: The persona. - `principal_id`: The principal ID. This currently supports a IAM Identity Center UserId. - `studio_id`: The studio ID. @@ -1967,8 +2094,9 @@ Update a user persona in launch profile membership. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"X-Amz-Client-Token"`: Unique, case-sensitive identifier that you provide to ensure the - idempotency of the request. If you don’t specify a client token, the AWS SDK - automatically generates a client token and uses it for the request to ensure idempotency. + idempotency of the request. If you don’t specify a client token, the Amazon Web Services + SDK automatically generates a client token and uses it for the request to ensure + idempotency. """ function update_launch_profile_member( launchProfileId, @@ -2023,8 +2151,9 @@ Update streaming image. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"X-Amz-Client-Token"`: Unique, case-sensitive identifier that you provide to ensure the - idempotency of the request. If you don’t specify a client token, the AWS SDK - automatically generates a client token and uses it for the request to ensure idempotency. + idempotency of the request. If you don’t specify a client token, the Amazon Web Services + SDK automatically generates a client token and uses it for the request to ensure + idempotency. - `"description"`: The description. - `"name"`: The name for the streaming image. """ @@ -2071,8 +2200,9 @@ of your studio. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"X-Amz-Client-Token"`: Unique, case-sensitive identifier that you provide to ensure the - idempotency of the request. If you don’t specify a client token, the AWS SDK - automatically generates a client token and uses it for the request to ensure idempotency. + idempotency of the request. If you don’t specify a client token, the Amazon Web Services + SDK automatically generates a client token and uses it for the request to ensure + idempotency. - `"adminRoleArn"`: The IAM role that Studio Admins will assume when logging in to the Nimble Studio portal. - `"displayName"`: A friendly name for the studio. @@ -2119,8 +2249,9 @@ Updates a studio component resource. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"X-Amz-Client-Token"`: Unique, case-sensitive identifier that you provide to ensure the - idempotency of the request. If you don’t specify a client token, the AWS SDK - automatically generates a client token and uses it for the request to ensure idempotency. + idempotency of the request. If you don’t specify a client token, the Amazon Web Services + SDK automatically generates a client token and uses it for the request to ensure + idempotency. - `"configuration"`: The configuration of the studio component, based on component type. - `"description"`: The description. - `"ec2SecurityGroupIds"`: The EC2 security groups that control access to the studio @@ -2128,11 +2259,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"initializationScripts"`: Initialization scripts for studio components. - `"name"`: The name for the studio component. - `"runtimeRoleArn"`: An IAM role attached to a Studio Component that gives the studio - component access to AWS resources at anytime while the instance is running. + component access to Amazon Web Services resources at anytime while the instance is running. - `"scriptParameters"`: Parameters for the studio component scripts. - `"secureInitializationRoleArn"`: An IAM role attached to Studio Component when the system - initialization script runs which give the studio component access to AWS resources when the - system initialization script runs. + initialization script runs which give the studio component access to Amazon Web Services + resources when the system initialization script runs. - `"subtype"`: The specific subtype of a studio component. - `"type"`: The type of the studio component. """ diff --git a/src/services/rds.jl b/src/services/rds.jl index e40c438e8e..b0f45954c9 100644 --- a/src/services/rds.jl +++ b/src/services/rds.jl @@ -981,50 +981,39 @@ function create_blue_green_deployment( end """ - create_custom_dbengine_version(database_installation_files_s3_bucket_name, engine, engine_version, kmskey_id, manifest) - create_custom_dbengine_version(database_installation_files_s3_bucket_name, engine, engine_version, kmskey_id, manifest, params::Dict{String,<:Any}) - -Creates a custom DB engine version (CEV). A CEV is a binary volume snapshot of a database -engine and specific AMI. The supported engines are the following: Oracle Database 12.1 -Enterprise Edition with the January 2021 or later RU/RUR Oracle Database 19c Enterprise -Edition with the January 2021 or later RU/RUR Amazon RDS, which is a fully managed -service, supplies the Amazon Machine Image (AMI) and database software. The Amazon RDS -database software is preinstalled, so you need only select a DB engine and version, and -create your database. With Amazon RDS Custom for Oracle, you upload your database -installation files in Amazon S3. When you create a custom engine version, you specify the -files in a JSON document called a CEV manifest. This document describes installation .zip -files stored in Amazon S3. RDS Custom creates your CEV from the installation files that you -provided. This service model is called Bring Your Own Media (BYOM). Creation takes -approximately two hours. If creation fails, RDS Custom issues RDS-EVENT-0196 with the -message Creation failed for custom engine version, and includes details about the failure. -For example, the event prints missing files. After you create the CEV, it is available for -use. You can create multiple CEVs, and create multiple RDS Custom instances from any CEV. -You can also change the status of a CEV to make it available or inactive. The MediaImport -service that imports files from Amazon S3 to create CEVs isn't integrated with Amazon Web -Services CloudTrail. If you turn on data logging for Amazon RDS in CloudTrail, calls to the -CreateCustomDbEngineVersion event aren't logged. However, you might see calls from the API -gateway that accesses your Amazon S3 bucket. These calls originate from the MediaImport -service for the CreateCustomDbEngineVersion event. For more information, see Creating a -CEV in the Amazon RDS User Guide. + create_custom_dbengine_version(engine, engine_version) + create_custom_dbengine_version(engine, engine_version, params::Dict{String,<:Any}) + +Creates a custom DB engine version (CEV). # Arguments -- `database_installation_files_s3_bucket_name`: The name of an Amazon S3 bucket that - contains database installation files for your CEV. For example, a valid bucket name is - my-custom-installation-files. - `engine`: The database engine to use for your custom engine version (CEV). The only supported value is custom-oracle-ee. - `engine_version`: The name of your CEV. The name format is 19.customized_string. For example, a valid CEV name is 19.my_cev1. This setting is required for RDS Custom for Oracle, but optional for Amazon RDS. The combination of Engine and EngineVersion is unique per customer per Region. -- `kmskey_id`: The Amazon Web Services KMS key identifier for an encrypted CEV. A symmetric - encryption KMS key is required for RDS Custom, but optional for Amazon RDS. If you have an - existing symmetric encryption KMS key in your account, you can use it with RDS Custom. No - further action is necessary. If you don't already have a symmetric encryption KMS key in - your account, follow the instructions in Creating a symmetric encryption KMS key in the - Amazon Web Services Key Management Service Developer Guide. You can choose the same - symmetric encryption key when you create a CEV and a DB instance, or choose different keys. -- `manifest`: The CEV manifest, which is a JSON document that describes the installation + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DatabaseInstallationFilesS3BucketName"`: The name of an Amazon S3 bucket that contains + database installation files for your CEV. For example, a valid bucket name is + my-custom-installation-files. +- `"DatabaseInstallationFilesS3Prefix"`: The Amazon S3 directory that contains the database + installation files for your CEV. For example, a valid bucket name is 123456789012/cev1. If + this setting isn't specified, no prefix is assumed. +- `"Description"`: An optional description of your CEV. +- `"ImageId"`: The ID of the AMI. An AMI ID is required to create a CEV for RDS Custom for + SQL Server. +- `"KMSKeyId"`: The Amazon Web Services KMS key identifier for an encrypted CEV. A + symmetric encryption KMS key is required for RDS Custom, but optional for Amazon RDS. If + you have an existing symmetric encryption KMS key in your account, you can use it with RDS + Custom. No further action is necessary. If you don't already have a symmetric encryption + KMS key in your account, follow the instructions in Creating a symmetric encryption KMS + key in the Amazon Web Services Key Management Service Developer Guide. You can choose the + same symmetric encryption key when you create a CEV and a DB instance, or choose different + keys. +- `"Manifest"`: The CEV manifest, which is a JSON document that describes the installation .zip files stored in Amazon S3. Specify the name/value pairs in a file or a quoted string. RDS Custom applies the patches in the order in which they are listed. The following JSON fields are valid: MediaImportTemplateVersion Version of the CEV manifest. The date is in @@ -1034,43 +1023,21 @@ CEV in the Amazon RDS User Guide. The patches that are not in the list of PSU and RU patches. Amazon RDS applies these patches after applying the PSU and RU patches. For more information, see Creating the CEV manifest in the Amazon RDS User Guide. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"DatabaseInstallationFilesS3Prefix"`: The Amazon S3 directory that contains the database - installation files for your CEV. For example, a valid bucket name is 123456789012/cev1. If - this setting isn't specified, no prefix is assumed. -- `"Description"`: An optional description of your CEV. - `"Tags"`: """ function create_custom_dbengine_version( - DatabaseInstallationFilesS3BucketName, - Engine, - EngineVersion, - KMSKeyId, - Manifest; - aws_config::AbstractAWSConfig=global_aws_config(), + Engine, EngineVersion; aws_config::AbstractAWSConfig=global_aws_config() ) return rds( "CreateCustomDBEngineVersion", - Dict{String,Any}( - "DatabaseInstallationFilesS3BucketName" => - DatabaseInstallationFilesS3BucketName, - "Engine" => Engine, - "EngineVersion" => EngineVersion, - "KMSKeyId" => KMSKeyId, - "Manifest" => Manifest, - ); + Dict{String,Any}("Engine" => Engine, "EngineVersion" => EngineVersion); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end function create_custom_dbengine_version( - DatabaseInstallationFilesS3BucketName, Engine, EngineVersion, - KMSKeyId, - Manifest, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) @@ -1079,14 +1046,7 @@ function create_custom_dbengine_version( Dict{String,Any}( mergewith( _merge, - Dict{String,Any}( - "DatabaseInstallationFilesS3BucketName" => - DatabaseInstallationFilesS3BucketName, - "Engine" => Engine, - "EngineVersion" => EngineVersion, - "KMSKeyId" => KMSKeyId, - "Manifest" => Manifest, - ), + Dict{String,Any}("Engine" => Engine, "EngineVersion" => EngineVersion), params, ), ); @@ -1254,9 +1214,29 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys identifier that is valid in the destination Amazon Web Services Region. This KMS key is used to encrypt the read replica in that Amazon Web Services Region. Valid for: Aurora DB clusters and Multi-AZ DB clusters +- `"ManageMasterUserPassword"`: A value that indicates whether to manage the master user + password with Amazon Web Services Secrets Manager. For more information, see Password + management with Amazon Web Services Secrets Manager in the Amazon RDS User Guide and + Password management with Amazon Web Services Secrets Manager in the Amazon Aurora User + Guide. Constraints: Can't manage the master user password with Amazon Web Services + Secrets Manager if MasterUserPassword is specified. Valid for: Aurora DB clusters and + Multi-AZ DB clusters - `"MasterUserPassword"`: The password for the master database user. This password can - contain any printable ASCII character except \"/\", \"\"\", or \"@\". Constraints: Must - contain from 8 to 41 characters. Valid for: Aurora DB clusters and Multi-AZ DB clusters + contain any printable ASCII character except \"/\", \"\"\", or \"@\". Constraints: Must + contain from 8 to 41 characters. Can't be specified if ManageMasterUserPassword is turned + on. Valid for: Aurora DB clusters and Multi-AZ DB clusters +- `"MasterUserSecretKmsKeyId"`: The Amazon Web Services KMS key identifier to encrypt a + secret that is automatically generated and managed in Amazon Web Services Secrets Manager. + This setting is valid only if the master user password is managed by RDS in Amazon Web + Services Secrets Manager for the DB cluster. The Amazon Web Services KMS key identifier is + the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a + different Amazon Web Services account, specify the key ARN or alias ARN. If you don't + specify MasterUserSecretKmsKeyId, then the aws/secretsmanager KMS key is used to encrypt + the secret. If the secret is in a different Amazon Web Services account, then you can't use + the aws/secretsmanager KMS key to encrypt the secret, and you must use a customer managed + KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web + Services account has a different default KMS key for each Amazon Web Services Region. Valid + for: Aurora DB clusters and Multi-AZ DB clusters - `"MasterUsername"`: The name of the master user for the DB cluster. Constraints: Must be 1 to 16 letters or numbers. First character must be a letter. Can't be a reserved word for the chosen database engine. Valid for: Aurora DB clusters and Multi-AZ DB @@ -1708,6 +1688,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Possible values are outposts (Amazon Web Services Outposts) and region (Amazon Web Services Region). The default is region. For more information, see Working with Amazon RDS on Amazon Web Services Outposts in the Amazon RDS User Guide. +- `"CACertificateIdentifier"`: Specifies the CA certificate identifier to use for the DB + instance’s server certificate. This setting doesn't apply to RDS Custom. For more + information, see Using SSL/TLS to encrypt a connection to a DB instance in the Amazon RDS + User Guide and Using SSL/TLS to encrypt a connection to a DB cluster in the Amazon Aurora + User Guide. - `"CharacterSetName"`: For supported engines, this value indicates that the DB instance should be associated with the specified CharacterSet. This setting doesn't apply to RDS Custom. However, if you need to change the character set, you can change it on the database @@ -1847,13 +1832,30 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"LicenseModel"`: License model information for this DB instance. Valid values: license-included | bring-your-own-license | general-public-license This setting doesn't apply to RDS Custom. Amazon Aurora Not applicable. +- `"ManageMasterUserPassword"`: A value that indicates whether to manage the master user + password with Amazon Web Services Secrets Manager. For more information, see Password + management with Amazon Web Services Secrets Manager in the Amazon RDS User Guide. + Constraints: Can't manage the master user password with Amazon Web Services Secrets + Manager if MasterUserPassword is specified. - `"MasterUserPassword"`: The password for the master user. The password can include any printable ASCII character except \"/\", \"\"\", or \"@\". Amazon Aurora Not applicable. - The password for the master user is managed by the DB cluster. MariaDB Constraints: Must - contain from 8 to 41 characters. Microsoft SQL Server Constraints: Must contain from 8 to - 128 characters. MySQL Constraints: Must contain from 8 to 41 characters. Oracle + The password for the master user is managed by the DB cluster. Constraints: Can't be + specified if ManageMasterUserPassword is turned on. MariaDB Constraints: Must contain + from 8 to 41 characters. Microsoft SQL Server Constraints: Must contain from 8 to 128 + characters. MySQL Constraints: Must contain from 8 to 41 characters. Oracle Constraints: Must contain from 8 to 30 characters. PostgreSQL Constraints: Must contain from 8 to 128 characters. +- `"MasterUserSecretKmsKeyId"`: The Amazon Web Services KMS key identifier to encrypt a + secret that is automatically generated and managed in Amazon Web Services Secrets Manager. + This setting is valid only if the master user password is managed by RDS in Amazon Web + Services Secrets Manager for the DB instance. The Amazon Web Services KMS key identifier is + the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a + different Amazon Web Services account, specify the key ARN or alias ARN. If you don't + specify MasterUserSecretKmsKeyId, then the aws/secretsmanager KMS key is used to encrypt + the secret. If the secret is in a different Amazon Web Services account, then you can't use + the aws/secretsmanager KMS key to encrypt the secret, and you must use a customer managed + KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web + Services account has a different default KMS key for each Amazon Web Services Region. - `"MasterUsername"`: The name for the master user. Amazon Aurora Not applicable. The name for the master user is managed by the DB cluster. Amazon RDS Constraints: Required. Must be 1 to 16 letters, numbers, or underscores. First character must be a @@ -2100,6 +2102,14 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide. This setting doesn't apply to RDS Custom. +- `"EnableCustomerOwnedIp"`: A value that indicates whether to enable a customer-owned IP + address (CoIP) for an RDS on Outposts read replica. A CoIP provides local or external + connectivity to resources in your Outpost subnets through your on-premises network. For + some use cases, a CoIP can provide lower latency for connections to the read replica from + outside of its virtual private cloud (VPC) on your local network. For more information + about RDS on Outposts, see Working with Amazon RDS on Amazon Web Services Outposts in the + Amazon RDS User Guide. For more information about CoIPs, see Customer-owned IP addresses in + the Amazon Web Services Outposts User Guide. - `"EnableIAMDatabaseAuthentication"`: A value that indicates whether to enable mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By default, mapping isn't enabled. For more information about IAM database authentication, see @@ -3800,7 +3810,9 @@ end describe_certificates(params::Dict{String,<:Any}) Lists the set of CA certificates provided by Amazon RDS for this Amazon Web Services -account. +account. For more information, see Using SSL/TLS to encrypt a connection to a DB instance +in the Amazon RDS User Guide and Using SSL/TLS to encrypt a connection to a DB cluster in +the Amazon Aurora User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -6151,9 +6163,37 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys valid IOPS values, see Amazon RDS Provisioned IOPS storage in the Amazon RDS User Guide. Constraints: Must be a multiple between .5 and 50 of the storage amount for the DB cluster. Valid for: Multi-AZ DB clusters only +- `"ManageMasterUserPassword"`: A value that indicates whether to manage the master user + password with Amazon Web Services Secrets Manager. If the DB cluster doesn't manage the + master user password with Amazon Web Services Secrets Manager, you can turn on this + management. In this case, you can't specify MasterUserPassword. If the DB cluster already + manages the master user password with Amazon Web Services Secrets Manager, and you specify + that the master user password is not managed with Amazon Web Services Secrets Manager, then + you must specify MasterUserPassword. In this case, RDS deletes the secret and uses the new + password for the master user specified by MasterUserPassword. For more information, see + Password management with Amazon Web Services Secrets Manager in the Amazon RDS User Guide + and Password management with Amazon Web Services Secrets Manager in the Amazon Aurora User + Guide. Valid for: Aurora DB clusters and Multi-AZ DB clusters - `"MasterUserPassword"`: The new password for the master database user. This password can - contain any printable ASCII character except \"/\", \"\"\", or \"@\". Constraints: Must - contain from 8 to 41 characters. Valid for: Aurora DB clusters and Multi-AZ DB clusters + contain any printable ASCII character except \"/\", \"\"\", or \"@\". Constraints: Must + contain from 8 to 41 characters. Can't be specified if ManageMasterUserPassword is turned + on. Valid for: Aurora DB clusters and Multi-AZ DB clusters +- `"MasterUserSecretKmsKeyId"`: The Amazon Web Services KMS key identifier to encrypt a + secret that is automatically generated and managed in Amazon Web Services Secrets Manager. + This setting is valid only if both of the following conditions are met: The DB cluster + doesn't manage the master user password in Amazon Web Services Secrets Manager. If the DB + cluster already manages the master user password in Amazon Web Services Secrets Manager, + you can't change the KMS key that is used to encrypt the secret. You are turning on + ManageMasterUserPassword to manage the master user password in Amazon Web Services Secrets + Manager. If you are turning on ManageMasterUserPassword and don't specify + MasterUserSecretKmsKeyId, then the aws/secretsmanager KMS key is used to encrypt the + secret. If the secret is in a different Amazon Web Services account, then you can't use the + aws/secretsmanager KMS key to encrypt the secret, and you must use a customer managed KMS + key. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or + alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, + specify the key ARN or alias ARN. There is a default KMS key for your Amazon Web Services + account. Your Amazon Web Services account has a different default KMS key for each Amazon + Web Services Region. Valid for: Aurora DB clusters and Multi-AZ DB clusters - `"MonitoringInterval"`: The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB cluster. To turn off collecting Enhanced Monitoring metrics, specify 0. The default is 0. If MonitoringRoleArn is specified, also set @@ -6174,7 +6214,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys renaming a DB cluster. This value is stored as a lowercase string. Constraints: Must contain from 1 to 63 letters, numbers, or hyphens The first character must be a letter Can't end with a hyphen or contain two consecutive hyphens Example: my-cluster2 Valid - for: Aurora DB clusters only + for: Aurora DB clusters and Multi-AZ DB clusters - `"OptionGroupName"`: A value that indicates that the DB cluster should be associated with the specified option group. DB clusters are associated with a default option group that can't be modified. @@ -6209,6 +6249,14 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys see Adjusting the Preferred DB Cluster Maintenance Window in the Amazon Aurora User Guide. Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun. Constraints: Minimum 30-minute window. Valid for: Aurora DB clusters and Multi-AZ DB clusters +- `"RotateMasterUserPassword"`: A value that indicates whether to rotate the secret managed + by Amazon Web Services Secrets Manager for the master user password. This setting is valid + only if the master user password is managed by RDS in Amazon Web Services Secrets Manager + for the DB cluster. The secret value contains the updated password. For more information, + see Password management with Amazon Web Services Secrets Manager in the Amazon RDS User + Guide and Password management with Amazon Web Services Secrets Manager in the Amazon Aurora + User Guide. Constraints: You must apply the change immediately when rotating the master + user password. Valid for: Aurora DB clusters and Multi-AZ DB clusters - `"ScalingConfiguration"`: The scaling properties of the DB cluster. You can only modify scaling properties for DB clusters in serverless DB engine mode. Valid for: Aurora DB clusters only @@ -6525,8 +6573,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Oracle DB instance. It can be specified for a MySQL read replica only if the source is running MySQL 5.6 or later. It can be specified for a PostgreSQL read replica only if the source is running PostgreSQL 9.3.5. -- `"CACertificateIdentifier"`: Specifies the certificate to associate with the DB instance. - This setting doesn't apply to RDS Custom. +- `"CACertificateIdentifier"`: Specifies the CA certificate identifier to use for the DB + instance’s server certificate. This setting doesn't apply to RDS Custom. For more + information, see Using SSL/TLS to encrypt a connection to a DB instance in the Amazon RDS + User Guide and Using SSL/TLS to encrypt a connection to a DB cluster in the Amazon Aurora + User Guide. - `"CertificateRotationRestart"`: A value that indicates whether the DB instance is restarted when you rotate your SSL/TLS certificate. By default, the DB instance is restarted when you rotate your SSL/TLS certificate. The certificate is not updated until @@ -6550,11 +6601,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"DBInstanceClass"`: The new compute and memory capacity of the DB instance, for example db.m4.large. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines. For the full list of DB instance classes, and availability for - your engine, see DB instance classes in the Amazon RDS User Guide or Aurora DB instance - classes in the Amazon Aurora User Guide. If you modify the DB instance class, an outage - occurs during the change. The change is applied during the next maintenance window, unless - ApplyImmediately is enabled for this request. This setting doesn't apply to RDS Custom for - Oracle. Default: Uses existing setting + your engine, see DB Instance Class in the Amazon RDS User Guide or Aurora DB instance + classes in the Amazon Aurora User Guide. For RDS Custom, see DB instance class support for + RDS Custom for Oracle and DB instance class support for RDS Custom for SQL Server. If you + modify the DB instance class, an outage occurs during the change. The change is applied + during the next maintenance window, unless you specify ApplyImmediately in your request. + Default: Uses existing setting - `"DBParameterGroupName"`: The name of the DB parameter group to apply to the DB instance. Changing this setting doesn't result in an outage. The parameter group name itself is changed immediately, but the actual parameter changes are not applied until you reboot the @@ -6645,6 +6697,17 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"LicenseModel"`: The license model for the DB instance. This setting doesn't apply to RDS Custom. Valid values: license-included | bring-your-own-license | general-public-license +- `"ManageMasterUserPassword"`: A value that indicates whether to manage the master user + password with Amazon Web Services Secrets Manager. If the DB cluster doesn't manage the + master user password with Amazon Web Services Secrets Manager, you can turn on this + management. In this case, you can't specify MasterUserPassword. If the DB cluster already + manages the master user password with Amazon Web Services Secrets Manager, and you specify + that the master user password is not managed with Amazon Web Services Secrets Manager, then + you must specify MasterUserPassword. In this case, RDS deletes the secret and uses the new + password for the master user specified by MasterUserPassword. For more information, see + Password management with Amazon Web Services Secrets Manager in the Amazon RDS User Guide. + Constraints: Can't manage the master user password with Amazon Web Services Secrets + Manager if MasterUserPassword is specified. - `"MasterUserPassword"`: The new password for the master user. The password can include any printable ASCII character except \"/\", \"\"\", or \"@\". Changing this parameter doesn't result in an outage and the change is asynchronously applied as soon as possible. @@ -6652,13 +6715,30 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys element exists in the PendingModifiedValues element of the operation response. This setting doesn't apply to RDS Custom. Amazon Aurora Not applicable. The password for the master user is managed by the DB cluster. For more information, see ModifyDBCluster. Default: Uses - existing setting MariaDB Constraints: Must contain from 8 to 41 characters. Microsoft - SQL Server Constraints: Must contain from 8 to 128 characters. MySQL Constraints: Must - contain from 8 to 41 characters. Oracle Constraints: Must contain from 8 to 30 - characters. PostgreSQL Constraints: Must contain from 8 to 128 characters. Amazon RDS - API operations never return the password, so this action provides a way to regain access to - a primary instance user if the password is lost. This includes restoring privileges that - might have been accidentally revoked. + existing setting Constraints: Can't be specified if ManageMasterUserPassword is turned on. + MariaDB Constraints: Must contain from 8 to 41 characters. Microsoft SQL Server + Constraints: Must contain from 8 to 128 characters. MySQL Constraints: Must contain from + 8 to 41 characters. Oracle Constraints: Must contain from 8 to 30 characters. PostgreSQL + Constraints: Must contain from 8 to 128 characters. Amazon RDS API operations never + return the password, so this action provides a way to regain access to a primary instance + user if the password is lost. This includes restoring privileges that might have been + accidentally revoked. +- `"MasterUserSecretKmsKeyId"`: The Amazon Web Services KMS key identifier to encrypt a + secret that is automatically generated and managed in Amazon Web Services Secrets Manager. + This setting is valid only if both of the following conditions are met: The DB instance + doesn't manage the master user password in Amazon Web Services Secrets Manager. If the DB + instance already manages the master user password in Amazon Web Services Secrets Manager, + you can't change the KMS key used to encrypt the secret. You are turning on + ManageMasterUserPassword to manage the master user password in Amazon Web Services Secrets + Manager. If you are turning on ManageMasterUserPassword and don't specify + MasterUserSecretKmsKeyId, then the aws/secretsmanager KMS key is used to encrypt the + secret. If the secret is in a different Amazon Web Services account, then you can't use the + aws/secretsmanager KMS key to encrypt the secret, and you must use a customer managed KMS + key. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or + alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, + specify the key ARN or alias ARN. There is a default KMS key for your Amazon Web Services + account. Your Amazon Web Services account has a different default KMS key for each Amazon + Web Services Region. - `"MaxAllocatedStorage"`: The upper limit in gibibytes (GiB) to which Amazon RDS can automatically scale the storage of the DB instance. For more information about this setting, including limitations that apply to it, see Managing capacity automatically with @@ -6762,6 +6842,13 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"ResumeFullAutomationModeMinutes"`: The number of minutes to pause the automation. When the time period ends, RDS Custom resumes full automation. The minimum value is 60 (default). The maximum value is 1,440. +- `"RotateMasterUserPassword"`: A value that indicates whether to rotate the secret managed + by Amazon Web Services Secrets Manager for the master user password. This setting is valid + only if the master user password is managed by RDS in Amazon Web Services Secrets Manager + for the DB cluster. The secret value contains the updated password. For more information, + see Password management with Amazon Web Services Secrets Manager in the Amazon RDS User + Guide. Constraints: You must apply the change immediately when rotating the master user + password. - `"StorageThroughput"`: Specifies the storage throughput value for the DB instance. This setting applies only to the gp3 storage type. This setting doesn't apply to RDS Custom or Amazon Aurora. @@ -8039,8 +8126,8 @@ function reset_dbparameter_group( end """ - restore_dbcluster_from_s3(dbcluster_identifier, engine, master_user_password, master_username, s3_bucket_name, s3_ingestion_role_arn, source_engine, source_engine_version) - restore_dbcluster_from_s3(dbcluster_identifier, engine, master_user_password, master_username, s3_bucket_name, s3_ingestion_role_arn, source_engine, source_engine_version, params::Dict{String,<:Any}) + restore_dbcluster_from_s3(dbcluster_identifier, engine, master_username, s3_bucket_name, s3_ingestion_role_arn, source_engine, source_engine_version) + restore_dbcluster_from_s3(dbcluster_identifier, engine, master_username, s3_bucket_name, s3_ingestion_role_arn, source_engine, source_engine_version, params::Dict{String,<:Any}) Creates an Amazon Aurora DB cluster from MySQL data stored in an Amazon S3 bucket. Amazon RDS must be authorized to access the Amazon S3 bucket and the data must be created using @@ -8061,9 +8148,6 @@ This action only applies to Aurora DB clusters. The source DB engine must be MyS - `engine`: The name of the database engine to be used for this DB cluster. Valid Values: aurora (for MySQL 5.6-compatible Aurora) and aurora-mysql (for MySQL 5.7-compatible and MySQL 8.0-compatible Aurora) -- `master_user_password`: The password for the master database user. This password can - contain any printable ASCII character except \"/\", \"\"\", or \"@\". Constraints: Must - contain from 8 to 41 characters. - `master_username`: The name of the master user for the restored DB cluster. Constraints: Must be 1 to 16 letters or numbers. First character must be a letter. Can't be a reserved word for the chosen database engine. @@ -8133,6 +8217,27 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys value for the KmsKeyId parameter, then Amazon RDS will use your default KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region. +- `"ManageMasterUserPassword"`: A value that indicates whether to manage the master user + password with Amazon Web Services Secrets Manager. For more information, see Password + management with Amazon Web Services Secrets Manager in the Amazon RDS User Guide and + Password management with Amazon Web Services Secrets Manager in the Amazon Aurora User + Guide. Constraints: Can't manage the master user password with Amazon Web Services + Secrets Manager if MasterUserPassword is specified. +- `"MasterUserPassword"`: The password for the master database user. This password can + contain any printable ASCII character except \"/\", \"\"\", or \"@\". Constraints: Must + contain from 8 to 41 characters. Can't be specified if ManageMasterUserPassword is turned + on. +- `"MasterUserSecretKmsKeyId"`: The Amazon Web Services KMS key identifier to encrypt a + secret that is automatically generated and managed in Amazon Web Services Secrets Manager. + This setting is valid only if the master user password is managed by RDS in Amazon Web + Services Secrets Manager for the DB cluster. The Amazon Web Services KMS key identifier is + the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a + different Amazon Web Services account, specify the key ARN or alias ARN. If you don't + specify MasterUserSecretKmsKeyId, then the aws/secretsmanager KMS key is used to encrypt + the secret. If the secret is in a different Amazon Web Services account, then you can't use + the aws/secretsmanager KMS key to encrypt the secret, and you must use a customer managed + KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web + Services account has a different default KMS key for each Amazon Web Services Region. - `"NetworkType"`: The network type of the DB cluster. Valid values: IPV4 DUAL The network type is determined by the DBSubnetGroup specified for the DB cluster. A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL). @@ -8169,7 +8274,6 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys function restore_dbcluster_from_s3( DBClusterIdentifier, Engine, - MasterUserPassword, MasterUsername, S3BucketName, S3IngestionRoleArn, @@ -8182,7 +8286,6 @@ function restore_dbcluster_from_s3( Dict{String,Any}( "DBClusterIdentifier" => DBClusterIdentifier, "Engine" => Engine, - "MasterUserPassword" => MasterUserPassword, "MasterUsername" => MasterUsername, "S3BucketName" => S3BucketName, "S3IngestionRoleArn" => S3IngestionRoleArn, @@ -8196,7 +8299,6 @@ end function restore_dbcluster_from_s3( DBClusterIdentifier, Engine, - MasterUserPassword, MasterUsername, S3BucketName, S3IngestionRoleArn, @@ -8213,7 +8315,6 @@ function restore_dbcluster_from_s3( Dict{String,Any}( "DBClusterIdentifier" => DBClusterIdentifier, "Engine" => Engine, - "MasterUserPassword" => MasterUserPassword, "MasterUsername" => MasterUsername, "S3BucketName" => S3BucketName, "S3IngestionRoleArn" => S3IngestionRoleArn, @@ -8902,9 +9003,29 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region. - `"LicenseModel"`: The license model for this DB instance. Use general-public-license. +- `"ManageMasterUserPassword"`: A value that indicates whether to manage the master user + password with Amazon Web Services Secrets Manager. For more information, see Password + management with Amazon Web Services Secrets Manager in the Amazon RDS User Guide. + Constraints: Can't manage the master user password with Amazon Web Services Secrets + Manager if MasterUserPassword is specified. - `"MasterUserPassword"`: The password for the master user. The password can include any - printable ASCII character except \"/\", \"\"\", or \"@\". Constraints: Must contain from 8 - to 41 characters. + printable ASCII character except \"/\", \"\"\", or \"@\". Constraints: Can't be specified + if ManageMasterUserPassword is turned on. MariaDB Constraints: Must contain from 8 to 41 + characters. Microsoft SQL Server Constraints: Must contain from 8 to 128 characters. + MySQL Constraints: Must contain from 8 to 41 characters. Oracle Constraints: Must + contain from 8 to 30 characters. PostgreSQL Constraints: Must contain from 8 to 128 + characters. +- `"MasterUserSecretKmsKeyId"`: The Amazon Web Services KMS key identifier to encrypt a + secret that is automatically generated and managed in Amazon Web Services Secrets Manager. + This setting is valid only if the master user password is managed by RDS in Amazon Web + Services Secrets Manager for the DB instance. The Amazon Web Services KMS key identifier is + the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a + different Amazon Web Services account, specify the key ARN or alias ARN. If you don't + specify MasterUserSecretKmsKeyId, then the aws/secretsmanager KMS key is used to encrypt + the secret. If the secret is in a different Amazon Web Services account, then you can't use + the aws/secretsmanager KMS key to encrypt the secret, and you must use a customer managed + KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web + Services account has a different default KMS key for each Amazon Web Services Region. - `"MasterUsername"`: The name for the master user. Constraints: Must be 1 to 16 letters or numbers. First character must be a letter. Can't be a reserved word for the chosen database engine. @@ -9513,11 +9634,10 @@ S3 bucket. This command doesn't apply to RDS Custom. - `kms_key_id`: The ID of the Amazon Web Services KMS key to use to encrypt the snapshot exported to Amazon S3. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. The caller of this operation must be authorized - to execute the following operations. These can be set in the Amazon Web Services KMS key - policy: GrantOperation.Encrypt GrantOperation.Decrypt GrantOperation.GenerateDataKey - GrantOperation.GenerateDataKeyWithoutPlaintext GrantOperation.ReEncryptFrom - GrantOperation.ReEncryptTo GrantOperation.CreateGrant GrantOperation.DescribeKey - GrantOperation.RetireGrant + to run the following operations. These can be set in the Amazon Web Services KMS key + policy: kms:Encrypt kms:Decrypt kms:GenerateDataKey + kms:GenerateDataKeyWithoutPlaintext kms:ReEncryptFrom kms:ReEncryptTo kms:CreateGrant + kms:DescribeKey kms:RetireGrant - `s3_bucket_name`: The name of the Amazon S3 bucket to export the snapshot to. - `source_arn`: The Amazon Resource Name (ARN) of the snapshot to export to Amazon S3. diff --git a/src/services/resource_explorer_2.jl b/src/services/resource_explorer_2.jl index 43be14594c..2fabe773cb 100644 --- a/src/services/resource_explorer_2.jl +++ b/src/services/resource_explorer_2.jl @@ -206,7 +206,10 @@ specified Amazon Web Services Region. When you delete an index, Resource Explore discovering and indexing resources in that Region. Resource Explorer also deletes all views in that Region. These actions occur as asynchronous background tasks. You can check to see when the actions are complete by using the GetIndex operation and checking the Status -response value. +response value. If the index you delete is the aggregator index for the Amazon Web +Services account, you must wait 24 hours before you can promote another local index to be +the aggregator index for the account. Users can't perform account-wide searches using +Resource Explorer until another aggregator index is configured. # Arguments - `arn`: The Amazon resource name (ARN) of the index that you want to delete. diff --git a/src/services/route53_recovery_control_config.jl b/src/services/route53_recovery_control_config.jl index 728e1c52e7..b79cf64d7c 100644 --- a/src/services/route53_recovery_control_config.jl +++ b/src/services/route53_recovery_control_config.jl @@ -185,7 +185,7 @@ changing routing control states, and for enabling and disabling routing controls prevent unexpected outcomes. There are two types of safety rules: assertion rules and gating rules. Assertion rule: An assertion rule enforces that, when you change a routing control state, that a certain criteria is met. For example, the criteria might be that at -least one routing control state is On after the transation so that traffic continues to +least one routing control state is On after the transaction so that traffic continues to flow to at least one cell for the application. This ensures that you avoid a fail-open scenario. Gating rule: A gating rule lets you configure a gating routing control as an overall \"on/off\" switch for a group of routing controls. Or, you can configure more diff --git a/src/services/route_53_domains.jl b/src/services/route_53_domains.jl index 0e46e7aa35..722d025da1 100644 --- a/src/services/route_53_domains.jl +++ b/src/services/route_53_domains.jl @@ -54,6 +54,56 @@ function accept_domain_transfer_from_another_aws_account( ) end +""" + associate_delegation_signer_to_domain(domain_name, signing_attributes) + associate_delegation_signer_to_domain(domain_name, signing_attributes, params::Dict{String,<:Any}) + + Creates a delegation signer (DS) record in the registry zone for this domain name. Note +that creating DS record at the registry impacts DNSSEC validation of your DNS records. This +action may render your domain name unavailable on the internet if the steps are completed +in the wrong order, or with incorrect timing. For more information about DNSSEC signing, +see Configuring DNSSEC signing in the Route 53 developer guide. + +# Arguments +- `domain_name`: The name of the domain. +- `signing_attributes`: The information about a key, including the algorithm, public + key-value, and flags. + +""" +function associate_delegation_signer_to_domain( + DomainName, SigningAttributes; aws_config::AbstractAWSConfig=global_aws_config() +) + return route_53_domains( + "AssociateDelegationSignerToDomain", + Dict{String,Any}( + "DomainName" => DomainName, "SigningAttributes" => SigningAttributes + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function associate_delegation_signer_to_domain( + DomainName, + SigningAttributes, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return route_53_domains( + "AssociateDelegationSignerToDomain", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "DomainName" => DomainName, "SigningAttributes" => SigningAttributes + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ cancel_domain_transfer_to_another_aws_account(domain_name) cancel_domain_transfer_to_another_aws_account(domain_name, params::Dict{String,<:Any}) @@ -351,6 +401,46 @@ function disable_domain_transfer_lock( ) end +""" + disassociate_delegation_signer_from_domain(domain_name, id) + disassociate_delegation_signer_from_domain(domain_name, id, params::Dict{String,<:Any}) + +Deletes a delegation signer (DS) record in the registry zone for this domain name. + +# Arguments +- `domain_name`: Name of the domain. +- `id`: An internal identification number assigned to each DS record after it’s created. + You can retrieve it as part of DNSSEC information returned by GetDomainDetail. + +""" +function disassociate_delegation_signer_from_domain( + DomainName, Id; aws_config::AbstractAWSConfig=global_aws_config() +) + return route_53_domains( + "DisassociateDelegationSignerFromDomain", + Dict{String,Any}("DomainName" => DomainName, "Id" => Id); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function disassociate_delegation_signer_from_domain( + DomainName, + Id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return route_53_domains( + "DisassociateDelegationSignerFromDomain", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("DomainName" => DomainName, "Id" => Id), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ enable_domain_auto_renew(domain_name) enable_domain_auto_renew(domain_name, params::Dict{String,<:Any}) @@ -653,9 +743,13 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys NextPageMarker from the previous response, and submit another request that includes the value of NextPageMarker in the Marker element. - `"MaxItems"`: Number of domains to be returned. Default: 20 +- `"SortBy"`: The sort type for returned values. +- `"SortOrder"`: The sort order ofr returned values, either ascending or descending. +- `"Status"`: The status of the operations. - `"SubmittedSince"`: An optional parameter that lets you get information about all the operations that you submitted after a specified date and time. Specify the date and time in Unix time format and Coordinated Universal time (UTC). +- `"Type"`: An arrays of the domains operation types. """ function list_operations(; aws_config::AbstractAWSConfig=global_aws_config()) return route_53_domains( @@ -739,6 +833,47 @@ function list_tags_for_domain( ) end +""" + push_domain(domain_name, target) + push_domain(domain_name, target, params::Dict{String,<:Any}) + + Moves a domain from Amazon Web Services to another registrar. Supported actions: +Changes the IPS tags of a .uk domain, and pushes it to transit. Transit means that the +domain is ready to be transferred to another registrar. + +# Arguments +- `domain_name`: Name of the domain. +- `target`: New IPS tag for the domain. + +""" +function push_domain(DomainName, Target; aws_config::AbstractAWSConfig=global_aws_config()) + return route_53_domains( + "PushDomain", + Dict{String,Any}("DomainName" => DomainName, "Target" => Target); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function push_domain( + DomainName, + Target, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return route_53_domains( + "PushDomain", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("DomainName" => DomainName, "Target" => Target), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ register_domain(admin_contact, domain_name, duration_in_years, registrant_contact, tech_contact) register_domain(admin_contact, domain_name, duration_in_years, registrant_contact, tech_contact, params::Dict{String,<:Any}) @@ -749,7 +884,7 @@ For some top-level domains (TLDs), this operation requires extra parameters. Whe register a domain, Amazon Route 53 does the following: Creates a Route 53 hosted zone that has the same name as the domain. Route 53 assigns four name servers to your hosted zone and automatically updates your domain registration with the names of these name -servers. Enables autorenew, so your domain registration will renew automatically each +servers. Enables auto renew, so your domain registration will renew automatically each year. We'll notify you in advance of the renewal date so you can choose whether to renew the registration. Optionally enables privacy protection, so WHOIS queries return contact information either for Amazon Registrar (for .com, .net, and .org domains) or for our @@ -787,7 +922,7 @@ see Amazon Route 53 Pricing. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"AutoRenew"`: Indicates whether the domain will be automatically renewed (true) or not - (false). Autorenewal only takes effect after the account is charged. Default: true + (false). Auto renewal only takes effect after the account is charged. Default: true - `"IdnLangCode"`: Reserved for future use. - `"PrivacyProtectAdminContact"`: Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries return contact information @@ -989,12 +1124,47 @@ function resend_contact_reachability_email( ) end +""" + resend_operation_authorization(operation_id) + resend_operation_authorization(operation_id, params::Dict{String,<:Any}) + + Resend the form of authorization email for this operation. + +# Arguments +- `operation_id`: Operation ID. + +""" +function resend_operation_authorization( + OperationId; aws_config::AbstractAWSConfig=global_aws_config() +) + return route_53_domains( + "ResendOperationAuthorization", + Dict{String,Any}("OperationId" => OperationId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function resend_operation_authorization( + OperationId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return route_53_domains( + "ResendOperationAuthorization", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("OperationId" => OperationId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ retrieve_domain_auth_code(domain_name) retrieve_domain_auth_code(domain_name, params::Dict{String,<:Any}) -This operation returns the AuthCode for the domain. To transfer a domain to another -registrar, you provide this value to the new registrar. +This operation returns the authorization code for the domain. To transfer a domain to +another registrar, you provide this value to the new registrar. # Arguments - `domain_name`: The name of the domain that you want to get an authorization code for. @@ -1071,7 +1241,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"AuthCode"`: The authorization code for the domain. You get this value from the current registrar. - `"AutoRenew"`: Indicates whether the domain will be automatically renewed (true) or not - (false). Autorenewal only takes effect after the account is charged. Default: true + (false). Auto renewal only takes effect after the account is charged. Default: true - `"IdnLangCode"`: Reserved for future use. - `"Nameservers"`: Contains details for the host and glue IP addresses. - `"PrivacyProtectAdminContact"`: Whether you want to conceal contact information from @@ -1208,7 +1378,7 @@ end This operation updates the contact information for a particular domain. You must specify information for at least one contact: registrant, administrator, or technical. If the update is successful, this method returns an operation ID that you can use to track the -progress and completion of the action. If the request is not completed successfully, the +progress and completion of the operation. If the request is not completed successfully, the domain registrant will be notified by email. # Arguments @@ -1217,6 +1387,7 @@ domain registrant will be notified by email. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"AdminContact"`: Provides detailed contact information. +- `"Consent"`: Customer's consent for the owner change request. - `"RegistrantContact"`: Provides detailed contact information. - `"TechContact"`: Provides detailed contact information. """ diff --git a/src/services/sagemaker.jl b/src/services/sagemaker.jl index 96c50d9297..1470549baf 100644 --- a/src/services/sagemaker.jl +++ b/src/services/sagemaker.jl @@ -1416,21 +1416,22 @@ end create_experiment(experiment_name) create_experiment(experiment_name, params::Dict{String,<:Any}) -Creates an SageMaker experiment. An experiment is a collection of trials that are observed, +Creates a SageMaker experiment. An experiment is a collection of trials that are observed, compared and evaluated as a group. A trial is a set of steps, called trial components, that -produce a machine learning model. The goal of an experiment is to determine the components -that produce the best model. Multiple trials are performed, each one isolating and -measuring the impact of a change to one or more inputs, while keeping the remaining inputs -constant. When you use SageMaker Studio or the SageMaker Python SDK, all experiments, -trials, and trial components are automatically tracked, logged, and indexed. When you use -the Amazon Web Services SDK for Python (Boto), you must use the logging APIs provided by -the SDK. You can add tags to experiments, trials, trial components and then use the Search -API to search for the tags. To add a description to an experiment, specify the optional -Description parameter. To add a description later, or to change the description, call the -UpdateExperiment API. To get a list of all your experiments, call the ListExperiments API. -To view an experiment's properties, call the DescribeExperiment API. To get a list of all -the trials associated with an experiment, call the ListTrials API. To create a trial call -the CreateTrial API. +produce a machine learning model. In the Studio UI, trials are referred to as run groups +and trial components are referred to as runs. The goal of an experiment is to determine +the components that produce the best model. Multiple trials are performed, each one +isolating and measuring the impact of a change to one or more inputs, while keeping the +remaining inputs constant. When you use SageMaker Studio or the SageMaker Python SDK, all +experiments, trials, and trial components are automatically tracked, logged, and indexed. +When you use the Amazon Web Services SDK for Python (Boto), you must use the logging APIs +provided by the SDK. You can add tags to experiments, trials, trial components and then use +the Search API to search for the tags. To add a description to an experiment, specify the +optional Description parameter. To add a description later, or to change the description, +call the UpdateExperiment API. To get a list of all your experiments, call the +ListExperiments API. To view an experiment's properties, call the DescribeExperiment API. +To get a list of all the trials associated with an experiment, call the ListTrials API. To +create a trial call the CreateTrial API. # Arguments - `experiment_name`: The name of the experiment. The name must be unique in your Amazon Web @@ -1840,8 +1841,8 @@ more information, see Bring your own SageMaker image. # Arguments - `image_name`: The name of the image. Must be unique to your account. -- `role_arn`: The Amazon Resource Name (ARN) of an IAM role that enables Amazon SageMaker - to perform tasks on your behalf. +- `role_arn`: The ARN of an IAM role that enables Amazon SageMaker to perform tasks on your + behalf. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1893,6 +1894,25 @@ Amazon Elastic Container Registry (ECR) container image specified by BaseImage. Services SDKs, such as the SDK for Python (Boto3), add a unique value to the call. - `image_name`: The ImageName of the Image to create a version of. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Aliases"`: A list of aliases created with the image version. +- `"Horovod"`: Indicates Horovod compatibility. +- `"JobType"`: Indicates SageMaker job type compatibility. TRAINING: The image version + is compatible with SageMaker training jobs. INFERENCE: The image version is compatible + with SageMaker inference jobs. NOTEBOOK_KERNEL: The image version is compatible with + SageMaker notebook kernels. +- `"MLFramework"`: The machine learning framework vended in the image version. +- `"Processor"`: Indicates CPU or GPU compatibility. CPU: The image version is + compatible with CPU. GPU: The image version is compatible with GPU. +- `"ProgrammingLang"`: The supported programming language and its version. +- `"ReleaseNotes"`: The maintainer description of the image version. +- `"VendorGuidance"`: The stability of the image version, specified by the maintainer. + NOT_PROVIDED: The maintainers did not provide a status for image version stability. + STABLE: The image version is stable. TO_BE_ARCHIVED: The image version is set to be + archived. Custom image versions that are set to be archived are automatically archived + after three months. ARCHIVED: The image version is archived. Archived image versions are + not searchable and are no longer actively supported. """ function create_image_version( BaseImage, ClientToken, ImageName; aws_config::AbstractAWSConfig=global_aws_config() @@ -4993,41 +5013,37 @@ function delete_image( end """ - delete_image_version(image_name, version) - delete_image_version(image_name, version, params::Dict{String,<:Any}) + delete_image_version(image_name) + delete_image_version(image_name, params::Dict{String,<:Any}) Deletes a version of a SageMaker image. The container image the version represents isn't deleted. # Arguments -- `image_name`: The name of the image. -- `version`: The version to delete. +- `image_name`: The name of the image to delete. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Alias"`: The alias of the image to delete. +- `"Version"`: The version to delete. """ -function delete_image_version( - ImageName, Version; aws_config::AbstractAWSConfig=global_aws_config() -) +function delete_image_version(ImageName; aws_config::AbstractAWSConfig=global_aws_config()) return sagemaker( "DeleteImageVersion", - Dict{String,Any}("ImageName" => ImageName, "Version" => Version); + Dict{String,Any}("ImageName" => ImageName); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end function delete_image_version( ImageName, - Version, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return sagemaker( "DeleteImageVersion", Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}("ImageName" => ImageName, "Version" => Version), - params, - ), + mergewith(_merge, Dict{String,Any}("ImageName" => ImageName), params) ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -6946,6 +6962,7 @@ Describes a version of a SageMaker image. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Alias"`: The alias of the image version. - `"Version"`: The version of the image. If not specified, the latest version is described. """ function describe_image_version( @@ -8537,6 +8554,47 @@ function list_algorithms( ) end +""" + list_aliases(image_name) + list_aliases(image_name, params::Dict{String,<:Any}) + +Lists the aliases of a specified image or image version. + +# Arguments +- `image_name`: The name of the image. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Alias"`: The alias of the image version. +- `"MaxResults"`: The maximum number of aliases to return. +- `"NextToken"`: If the previous call to ListAliases didn't return the full set of aliases, + the call returns a token for retrieving the next set of aliases. +- `"Version"`: The version of the image. If image version is not specified, the aliases of + all versions of the image are listed. +""" +function list_aliases(ImageName; aws_config::AbstractAWSConfig=global_aws_config()) + return sagemaker( + "ListAliases", + Dict{String,Any}("ImageName" => ImageName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_aliases( + ImageName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sagemaker( + "ListAliases", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ImageName" => ImageName), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_app_image_configs() list_app_image_configs(params::Dict{String,<:Any}) @@ -12758,6 +12816,10 @@ Updates the default settings for new user profiles in the domain. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AppSecurityGroupManagement"`: The entity that creates and manages the required security + groups for inter-app communication in VPCOnly mode. Required when + CreateDomain.AppNetworkAccessType is VPCOnly and + DomainSettings.RStudioServerProDomainSettings.DomainExecutionRoleArn is provided. - `"DefaultSpaceSettings"`: The default settings used to create a space within the Domain. - `"DefaultUserSettings"`: A collection of settings. - `"DomainSettingsForUpdate"`: A collection of DomainSettings configuration values to @@ -13098,8 +13160,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys DisplayName properties can be deleted. - `"Description"`: The new description for the image. - `"DisplayName"`: The new display name for the image. -- `"RoleArn"`: The new Amazon Resource Name (ARN) for the IAM role that enables Amazon - SageMaker to perform tasks on your behalf. +- `"RoleArn"`: The new ARN for the IAM role that enables Amazon SageMaker to perform tasks + on your behalf. """ function update_image(ImageName; aws_config::AbstractAWSConfig=global_aws_config()) return sagemaker( @@ -13124,6 +13186,61 @@ function update_image( ) end +""" + update_image_version(image_name) + update_image_version(image_name, params::Dict{String,<:Any}) + +Updates the properties of a SageMaker image version. + +# Arguments +- `image_name`: The name of the image. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Alias"`: The alias of the image version. +- `"AliasesToAdd"`: A list of aliases to add. +- `"AliasesToDelete"`: A list of aliases to delete. +- `"Horovod"`: Indicates Horovod compatibility. +- `"JobType"`: Indicates SageMaker job type compatibility. TRAINING: The image version + is compatible with SageMaker training jobs. INFERENCE: The image version is compatible + with SageMaker inference jobs. NOTEBOOK_KERNEL: The image version is compatible with + SageMaker notebook kernels. +- `"MLFramework"`: The machine learning framework vended in the image version. +- `"Processor"`: Indicates CPU or GPU compatibility. CPU: The image version is + compatible with CPU. GPU: The image version is compatible with GPU. +- `"ProgrammingLang"`: The supported programming language and its version. +- `"ReleaseNotes"`: The maintainer description of the image version. +- `"VendorGuidance"`: The availability of the image version specified by the maintainer. + NOT_PROVIDED: The maintainers did not provide a status for image version stability. + STABLE: The image version is stable. TO_BE_ARCHIVED: The image version is set to be + archived. Custom image versions that are set to be archived are automatically archived + after three months. ARCHIVED: The image version is archived. Archived image versions are + not searchable and are no longer actively supported. +- `"Version"`: The version of the image. +""" +function update_image_version(ImageName; aws_config::AbstractAWSConfig=global_aws_config()) + return sagemaker( + "UpdateImageVersion", + Dict{String,Any}("ImageName" => ImageName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_image_version( + ImageName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sagemaker( + "UpdateImageVersion", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ImageName" => ImageName), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_inference_experiment(name) update_inference_experiment(name, params::Dict{String,<:Any}) diff --git a/src/services/secrets_manager.jl b/src/services/secrets_manager.jl index 7dd9247d86..774184219c 100644 --- a/src/services/secrets_manager.jl +++ b/src/services/secrets_manager.jl @@ -555,6 +555,7 @@ Manager and Authentication and access control in Secrets Manager. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"Filters"`: The filters to apply to the list of secrets. +- `"IncludePlannedDeletion"`: - `"MaxResults"`: The number of results to include in the response. If there are more results available, in the response, Secrets Manager includes NextToken. To get the next results, call ListSecrets again with the value from NextToken. diff --git a/src/services/securitylake.jl b/src/services/securitylake.jl index e4b84a97bd..35aae2e21e 100644 --- a/src/services/securitylake.jl +++ b/src/services/securitylake.jl @@ -8,29 +8,32 @@ using AWS.UUIDs create_aws_log_source(input_order) create_aws_log_source(input_order, params::Dict{String,<:Any}) -Adds a natively-supported Amazon Web Services service as a Security Lake source. Enables -source types for member accounts in required Regions, based on specified parameters. You -can choose any source type in any Region for accounts that are either part of a trusted -organization or standalone accounts. At least one of the three dimensions is a mandatory -input to this API. However, any combination of the three dimensions can be supplied to this -API. By default, dimension refers to the entire set. When you don't provide a dimension, -Security Lake assumes that the missing dimension refers to the entire set. This is -overridden when you supply any one of the inputs. For instance, when members is not -specified, the API disables all Security Lake member accounts for sources. Similarly, when -Regions are not specified, Security Lake is disabled for all the Regions where Security -Lake is available as a service. You can use this API only to enable a natively-supported -Amazon Web Services services as a source. Use CreateCustomLogSource to enable data -collection from a custom source. +Adds a natively supported Amazon Web Service as an Amazon Security Lake source. Enables +source types for member accounts in required Amazon Web Services Regions, based on the +parameters you specify. You can choose any source type in any Region for either accounts +that are part of a trusted organization or standalone accounts. At least one of the three +dimensions is a mandatory input to this API. However, you can supply any combination of the +three dimensions to this API. By default, a dimension refers to the entire set. When you +don't provide a dimension, Security Lake assumes that the missing dimension refers to the +entire set. This is overridden when you supply any one of the inputs. For instance, when +you do not specify members, the API enables all Security Lake member accounts for all +sources. Similarly, when you do not specify Regions, Security Lake is enabled for all the +Regions where Security Lake is available as a service. You can use this API only to enable +natively supported Amazon Web Services as a source. Use CreateCustomLogSource to enable +data collection from a custom source. # Arguments - `input_order`: Specifies the input order to enable dimensions in Security Lake, namely - region, source type, and member account. + Region, source type, and member account. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"enableAllDimensions"`: Enables specific sources in all Regions and source types. -- `"enableSingleDimension"`: Enables all sources in specific accounts or Regions. -- `"enableTwoDimensions"`: Enables specific service sources in specific accounts or Regions. +- `"enableAllDimensions"`: Enables data collection from specific Amazon Web Services + sources in all specific accounts and specific Regions. +- `"enableSingleDimension"`: Enables data collection from all Amazon Web Services sources + in specific accounts or Regions. +- `"enableTwoDimensions"`: Enables data collection from specific Amazon Web Services + sources in specific accounts or Regions. """ function create_aws_log_source( inputOrder; aws_config::AbstractAWSConfig=global_aws_config() @@ -63,21 +66,24 @@ end create_custom_log_source(custom_source_name, event_class, glue_invocation_role_arn, log_provider_account_id) create_custom_log_source(custom_source_name, event_class, glue_invocation_role_arn, log_provider_account_id, params::Dict{String,<:Any}) -Adds a third-party custom source in Amazon Security Lake, from the Region where you want to -create a custom source. Security Lake can collect logs and events from third-party custom -sources. After creating the appropriate API roles, use this API to add a custom source name -in Security Lake. This operation creates a partition in the Security Lake S3 bucket as the -target location for log files from the custom source, an associated Glue table, and an Glue -crawler. +Adds a third-party custom source in Amazon Security Lake, from the Amazon Web Services +Region where you want to create a custom source. Security Lake can collect logs and events +from third-party custom sources. After creating the appropriate IAM role to invoke Glue +crawler, use this API to add a custom source name in Security Lake. This operation creates +a partition in the Amazon S3 bucket for Security Lake as the target location for log files +from the custom source in addition to an associated Glue table and an Glue crawler. # Arguments -- `custom_source_name`: The custom source name for a third-party custom source. -- `event_class`: The Open Cybersecurity Schema Framework (OCSF) event class. -- `glue_invocation_role_arn`: The IAM Role ARN to be used by the Glue Crawler. The - recommended IAM policies are: The managed policy AWSGlueServiceRole A custom policy - granting access to your S3 Data Lake -- `log_provider_account_id`: The Account ID that will assume the above Role to put logs - into the Data Lake. +- `custom_source_name`: The name for a third-party custom source. This must be a Regionally + unique value. +- `event_class`: The Open Cybersecurity Schema Framework (OCSF) event class which describes + the type of data that the custom source will send to Security Lake. +- `glue_invocation_role_arn`: The Amazon Resource Name (ARN) of the Identity and Access + Management (IAM) role to be used by the Glue crawler. The recommended IAM policies are: + The managed policy AWSGlueServiceRole A custom policy granting access to your Amazon S3 + Data Lake +- `log_provider_account_id`: The Amazon Web Services account ID of the custom source that + will write logs and events into the Amazon S3 Data Lake. """ function create_custom_log_source( @@ -133,31 +139,33 @@ end create_datalake(params::Dict{String,<:Any}) Initializes an Amazon Security Lake instance with the provided (or default) configuration. -You can enable Security Lake in Regions with customized settings in advance before enabling -log collection in Regions. You can either use the enableAll parameter to specify all -Regions or you can specify the Regions you want to enable Security Lake using the Regions -parameter and configure these Regions using the configurations parameter. When the -CreateDataLake API is called multiple times, if that Region is already enabled, it will -update the Region if configuration for that Region is provided. If that Region is a new -Region, it will be set up with the customized configurations if it is specified. When you +You can enable Security Lake in Amazon Web Services Regions with customized settings before +enabling log collection in Regions. You can either use the enableAll parameter to specify +all Regions or specify the Regions where you want to enable Security Lake. To specify +particular Regions, use the Regions parameter and then configure these Regions using the +configurations parameter. If you have already enabled Security Lake in a Region when you +call this command, the command will update the Region if you provide new configuration +parameters. If you have not already enabled Security Lake in the Region when you call this +API, it will set up the data lake in the Region with the specified configurations. When you enable Security Lake, it starts ingesting security data after the CreateAwsLogSource call. This includes ingesting security data from sources, storing data, and making data accessible to subscribers. Security Lake also enables all the existing settings and -resources that it stores or maintains for your account in the current Region, including -security log and event data. For more information, see the Amazon Security Lake User Guide. +resources that it stores or maintains for your Amazon Web Services account in the current +Region, including security log and event data. For more information, see the Amazon +Security Lake User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"configurations"`: Enable Security Lake with the specified configurations settings to - begin ingesting security data. -- `"enableAll"`: Enable Security Lake in all Regions to begin ingesting security data. -- `"metaStoreManagerRoleArn"`: The Role ARN used to create and update the Glue table with - partitions generated by ingestion and normalization of Amazon Web Services log sources and - custom sources. -- `"regions"`: Enable Security Lake in the specified Regions to begin ingesting security - data. To enable Security Lake in specific Amazon Web Services Regions, such as us-east-1 or - ap-northeast-3, provide the Region codes. For a list of Region codes, see Region codes in - the Amazon Web Services General Reference. +- `"configurations"`: Specify the Region or Regions that will contribute data to the rollup + region. +- `"enableAll"`: Enable Security Lake in all Regions. +- `"metaStoreManagerRoleArn"`: The Amazon Resource Name (ARN) used to create and update the + Glue table. This table contains partitions generated by the ingestion and normalization of + Amazon Web Services log sources and custom sources. +- `"regions"`: Enable Security Lake in the specified Regions. To enable Security Lake in + specific Amazon Web Services Regions, such as us-east-1 or ap-northeast-3, provide the + Region codes. For a list of Region codes, see Amazon Security Lake endpoints in the Amazon + Web Services General Reference. """ function create_datalake(; aws_config::AbstractAWSConfig=global_aws_config()) return securitylake( @@ -180,14 +188,13 @@ end create_datalake_auto_enable(configuration_for_new_accounts) create_datalake_auto_enable(configuration_for_new_accounts, params::Dict{String,<:Any}) -Automatically enable Security Lake in the specified Regions to begin ingesting security -data. When you choose to enable organization accounts automatically, then Security Lake -begins to enable new accounts as member accounts as they are added to the organization. -Security Lake does not enable existing organization accounts that are not yet enabled. +Automatically enables Amazon Security Lake for new member accounts in your organization. +Security Lake is not automatically enabled for any existing member accounts in your +organization. # Arguments -- `configuration_for_new_accounts`: Enable Amazon Security Lake with the specified - configurations settings to begin ingesting security data for new accounts in Security Lake. +- `configuration_for_new_accounts`: Enable Security Lake with the specified configuration + settings to begin collecting security data for new accounts in your organization. """ function create_datalake_auto_enable( @@ -227,12 +234,13 @@ end create_datalake_delegated_admin(account) create_datalake_delegated_admin(account, params::Dict{String,<:Any}) -Designates the Security Lake administrator account for the organization. This API can only -be called by the organization management account. The organization management account -cannot be the delegated administrator account. +Designates the Amazon Security Lake delegated administrator account for the organization. +This API can only be called by the organization management account. The organization +management account cannot be the delegated administrator account. # Arguments -- `account`: Account ID of the Security Lake delegated administrator. +- `account`: The Amazon Web Services account ID of the Security Lake delegated + administrator. """ function create_datalake_delegated_admin( @@ -262,13 +270,13 @@ end create_datalake_exceptions_subscription(notification_endpoint, subscription_protocol) create_datalake_exceptions_subscription(notification_endpoint, subscription_protocol, params::Dict{String,<:Any}) -Creates the specified notification subscription in Security Lake. Creates the specified -subscription notifications in the specified organization. +Creates the specified notification subscription in Amazon Security Lake for the +organization you specify. # Arguments -- `notification_endpoint`: The account in which the exception notifications subscription is - created. -- `subscription_protocol`: The subscription protocol to which exception messages are +- `notification_endpoint`: The Amazon Web Services account where you want to receive + exception notifications. +- `subscription_protocol`: The subscription protocol to which exception notifications are posted. """ @@ -316,24 +324,24 @@ end create_subscriber(account_id, external_id, source_types, subscriber_name) create_subscriber(account_id, external_id, source_types, subscriber_name, params::Dict{String,<:Any}) -Creates a subscription permission for accounts that are already enabled in Security Lake. +Creates a subscription permission for accounts that are already enabled in Amazon Security +Lake. You can create a subscriber with access to data in the current Amazon Web Services +Region. # Arguments -- `account_id`: The third party Amazon Web Services account ID used to access your data. -- `external_id`: The external ID of the subscriber. External ID allows the user that is - assuming the role to assert the circumstances in which they are operating. It also provides - a way for the account owner to permit the role to be assumed only under specific - circumstances. -- `source_types`: The supported Amazon Web Services services from which logs and events are - collected. Amazon Security Lake supports logs and events collection for natively-supported - Amazon Web Services services. -- `subscriber_name`: The name of your Amazon Security Lake subscriber account. +- `account_id`: The Amazon Web Services account ID used to access your data. +- `external_id`: The external ID of the subscriber. This lets the user that is assuming the + role assert the circumstances in which they are operating. It also provides a way for the + account owner to permit the role to be assumed only under specific circumstances. +- `source_types`: The supported Amazon Web Services from which logs and events are + collected. Security Lake supports log and event collection for natively supported Amazon + Web Services. +- `subscriber_name`: The name of your Security Lake subscriber account. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"accessTypes"`: The Amazon S3 or Lake Formation access type. -- `"subscriberDescription"`: The subscriber descriptions for the subscriber account in - Amazon Security Lake. +- `"subscriberDescription"`: The description for your subscriber account in Security Lake. """ function create_subscriber( accountId, @@ -387,22 +395,22 @@ end create_subscription_notification_configuration(subscription_id) create_subscription_notification_configuration(subscription_id, params::Dict{String,<:Any}) -Creates the specified notification subscription in Security Lake. Creates the specified -subscription notifications from the specified organization. +Notifies the subscriber when new data is written to the data lake for the sources that the +subscriber consumes in Security Lake. # Arguments -- `subscription_id`: The subscription ID for which the subscription notification is - specified. +- `subscription_id`: The subscription ID for the notification subscription/ # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"createSqs"`: Create a new subscription notification for the specified subscription ID - in Security Lake. -- `"httpsApiKeyName"`: The key name for the subscription notification. -- `"httpsApiKeyValue"`: The key value for the subscription notification. -- `"httpsMethod"`: The HTTPS method used for the subscription notification. -- `"roleArn"`: The Amazon Resource Name (ARN) specifying the role of the subscriber. -- `"subscriptionEndpoint"`: The subscription endpoint in Security Lake. +- `"createSqs"`: Create an Amazon Simple Queue Service queue. +- `"httpsApiKeyName"`: The key name for the notification subscription. +- `"httpsApiKeyValue"`: The key value for the notification subscription. +- `"httpsMethod"`: The HTTPS method used for the notification subscription. +- `"roleArn"`: The Amazon Resource Name (ARN) of the EventBridge API destinations IAM role + that you created. +- `"subscriptionEndpoint"`: The subscription endpoint in Security Lake. If you prefer + notification with an HTTPs endpoint, populate this field. """ function create_subscription_notification_configuration( subscriptionId; aws_config::AbstractAWSConfig=global_aws_config() @@ -432,31 +440,30 @@ end delete_aws_log_source(input_order) delete_aws_log_source(input_order, params::Dict{String,<:Any}) -Removes a natively-supported Amazon Web Services service as a Amazon Security Lake source. -When you remove the source, Security Lake stops collecting data from that source, and -subscribers can no longer consume new data from the source. Subscribers can still consume -data that Amazon Security Lake collected from the source before disablement. You can choose -any source type in any Region for accounts that are either part of a trusted organization -or standalone accounts. At least one of the three dimensions is a mandatory input to this -API. However, any combination of the three dimensions can be supplied to this API. By -default, dimension refers to the entire set. This is overridden when you supply any one of -the inputs. For instance, when members is not specified, the API disables all Security Lake -member accounts for sources. Similarly, when Regions are not specified, Security Lake is -disabled for all the Regions where Security Lake is available as a service. You can use -this API to remove a natively-supported Amazon Web Services service as a source. Use -DeregisterCustomData to remove a custom source. When you don't provide a dimension, -Security Lake assumes that the missing dimension refers to the entire set. For example, if -you don't provide specific accounts, the API applies to the entire set of accounts in your -organization. +Removes a natively supported Amazon Web Service as an Amazon Security Lake source. When you +remove the source, Security Lake stops collecting data from that source, and subscribers +can no longer consume new data from the source. Subscribers can still consume data that +Security Lake collected from the source before disablement. You can choose any source type +in any Amazon Web Services Region for either accounts that are part of a trusted +organization or standalone accounts. At least one of the three dimensions is a mandatory +input to this API. However, you can supply any combination of the three dimensions to this +API. By default, a dimension refers to the entire set. This is overridden when you supply +any one of the inputs. For instance, when you do not specify members, the API disables all +Security Lake member accounts for sources. Similarly, when you do not specify Regions, +Security Lake is disabled for all the Regions where Security Lake is available as a +service. When you don't provide a dimension, Security Lake assumes that the missing +dimension refers to the entire set. For example, if you don't provide specific accounts, +the API applies to the entire set of accounts in your organization. # Arguments -- `input_order`: This is a mandatory input. Specifies the input order to disable dimensions - in Security Lake, namely Region, source type, and member. +- `input_order`: This is a mandatory input. Specify the input order to disable dimensions + in Security Lake, namely Region (Amazon Web Services Region code, source type, and member + (account ID of a specific Amazon Web Services account). # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"disableAllDimensions"`: Removes the specific Amazon Web Services sources from all - Regions and source types. +- `"disableAllDimensions"`: Removes the specific Amazon Web Services sources from specific + accounts and specific Regions. - `"disableSingleDimension"`: Removes all Amazon Web Services sources from specific accounts or Regions. - `"disableTwoDimensions"`: Remove a specific Amazon Web Services source from specific @@ -493,10 +500,10 @@ end delete_custom_log_source(custom_source_name) delete_custom_log_source(custom_source_name, params::Dict{String,<:Any}) -Removes a custom log source from Security Lake. +Removes a custom log source from Amazon Security Lake. # Arguments -- `custom_source_name`: The custom source name for the custome log source. +- `custom_source_name`: The custom source name for the custom log source. """ function delete_custom_log_source( @@ -533,13 +540,13 @@ end delete_datalake(params::Dict{String,<:Any}) When you delete Amazon Security Lake from your account, Security Lake is disabled in all -Regions. Also, this API automatically performs the off-boarding steps to off-board the -account from Security Lake . This includes ingesting security data from sources, storing -data, and making data accessible to subscribers. Security Lake also deletes all the -existing settings and resources that it stores or maintains for your account in the current -Region, including security log and event data. DeleteDatalake does not delete the S3 bucket -which is owned by the Amazon Web Services account. For more information, see the Amazon -Security Lake User Guide. +Amazon Web Services Regions. Also, this API automatically takes steps to remove the account +from Security Lake . This operation disables security data collection from sources, +deletes data stored, and stops making data accessible to subscribers. Security Lake also +deletes all the existing settings and resources that it stores or maintains for your Amazon +Web Services account in the current Region, including security log and event data. The +DeleteDatalake operation does not delete the Amazon S3 bucket, which is owned by your +Amazon Web Services account. For more information, see the Amazon Security Lake User Guide. """ function delete_datalake(; aws_config::AbstractAWSConfig=global_aws_config()) @@ -563,18 +570,19 @@ end delete_datalake_auto_enable(remove_from_configuration_for_new_accounts) delete_datalake_auto_enable(remove_from_configuration_for_new_accounts, params::Dict{String,<:Any}) -Automatically delete Security Lake in the specified Regions to stop ingesting security -data. When you delete Amazon Security Lake from your account, Security Lake is disabled in -all Regions. Also, this API automatically performs the off-boarding steps to off-board the -account from Security Lake . This includes ingesting security data from sources, storing -data, and making data accessible to subscribers. Security Lake also deletes all the -existing settings and resources that it stores or maintains for your account in the current -Region, including security log and event data. For more information, see the Amazon -Security Lake User Guide. +Automatically deletes Amazon Security Lake to stop collecting security data. When you +delete Amazon Security Lake from your account, Security Lake is disabled in all Regions. +Also, this API automatically takes steps to remove the account from Security Lake . This +operation disables security data collection from sources, deletes data stored, and stops +making data accessible to subscribers. Security Lake also deletes all the existing settings +and resources that it stores or maintains for your Amazon Web Services account in the +current Region, including security log and event data. The DeleteDatalake operation does +not delete the Amazon S3 bucket, which is owned by your Amazon Web Services account. For +more information, see the Amazon Security Lake User Guide. # Arguments - `remove_from_configuration_for_new_accounts`: Delete Amazon Security Lake with the - specified configurations settings to stop ingesting security data for new accounts in + specified configuration settings to stop ingesting security data for new accounts in Security Lake. """ @@ -618,12 +626,12 @@ end delete_datalake_delegated_admin(account) delete_datalake_delegated_admin(account, params::Dict{String,<:Any}) -Deletes the Security Lake administrator account for the organization. This API can only be -called by the organization management account. The organization management account cannot -be the delegated administrator account. +Deletes the Amazon Security Lake delegated administrator account for the organization. This +API can only be called by the organization management account. The organization management +account cannot be the delegated administrator account. # Arguments -- `account`: Account ID the Security Lake delegated administrator. +- `account`: The account ID the Security Lake delegated administrator. """ function delete_datalake_delegated_admin( @@ -652,8 +660,8 @@ end delete_datalake_exceptions_subscription() delete_datalake_exceptions_subscription(params::Dict{String,<:Any}) -Deletes the specified notification subscription in Security Lake. Deletes the specified -subscription notifications in the specified organization. +Deletes the specified notification subscription in Amazon Security Lake for the +organization you specify. """ function delete_datalake_exceptions_subscription(; @@ -682,8 +690,9 @@ end delete_subscriber(id) delete_subscriber(id, params::Dict{String,<:Any}) -Deletes the specified subscription permissions to Security Lake. Deletes the specified -subscription permissions from the specified organization. +Deletes the subscription permission for accounts that are already enabled in Amazon +Security Lake. You can delete a subscriber and remove access to data in the current Amazon +Web Services Region. # Arguments - `id`: A value created by Security Lake that uniquely identifies your DeleteSubscriber API @@ -715,11 +724,11 @@ end delete_subscription_notification_configuration(subscription_id) delete_subscription_notification_configuration(subscription_id, params::Dict{String,<:Any}) -Deletes the specified notification subscription in Security Lake. Deletes the specified -subscription notifications from the specified organization. +Deletes the specified notification subscription in Amazon Security Lake for the +organization you specify. # Arguments -- `subscription_id`: The subscription ID of the Amazon Security Lake subscriber account. +- `subscription_id`: The ID of the Security Lake subscriber account. """ function delete_subscription_notification_configuration( @@ -750,8 +759,9 @@ end get_datalake() get_datalake(params::Dict{String,<:Any}) -Retrieve the Security Lake configuration object for the specified account ID. This API does -not take input parameters. +Retrieves the Amazon Security Lake configuration object for the specified Amazon Web +Services account ID. You can use the GetDatalake API to know whether Security Lake is +enabled for the current Region. This API does not take input parameters. """ function get_datalake(; aws_config::AbstractAWSConfig=global_aws_config()) @@ -776,7 +786,7 @@ end get_datalake_auto_enable(params::Dict{String,<:Any}) Retrieves the configuration that will be automatically set up for accounts added to the -organization after the organization has on boarded to Amazon Security Lake. This API does +organization after the organization has onboarded to Amazon Security Lake. This API does not take input parameters. """ @@ -805,9 +815,8 @@ end get_datalake_exceptions_expiry(params::Dict{String,<:Any}) Retrieves the expiration period and time-to-live (TTL) for which the exception message will -remain. Exceptions are stored by default, for a 2 week period of time from when a record -was created in Security Lake. This API does not take input parameters. This API does not -take input parameters. +remain. Exceptions are stored by default, for 2 weeks from when a record was created in +Amazon Security Lake. This API does not take input parameters. """ function get_datalake_exceptions_expiry(; aws_config::AbstractAWSConfig=global_aws_config()) @@ -863,20 +872,20 @@ end get_datalake_status() get_datalake_status(params::Dict{String,<:Any}) -Retrieve the Security Lake configuration object for the specified account ID. This API does -not take input parameters. +Retrieves a snapshot of the current Region, including whether Amazon Security Lake is +enabled for those accounts and which sources Security Lake is collecting data from. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"accountSet"`: The account IDs for which a static snapshot of the current Region, - including enabled accounts and log sources is retrieved. +- `"accountSet"`: The Amazon Web Services account ID for which a static snapshot of the + current Amazon Web Services Region, including enabled accounts and log sources, is + retrieved. - `"maxAccountResults"`: The maximum limit of accounts for which the static snapshot of the - current Region including enabled accounts and log sources is retrieved. -- `"nextToken"`: If nextToken is returned, there are more results available. The value of - nextToken is a unique pagination token for each page. Make the call again using the - returned token to retrieve the next page. Keep all other arguments unchanged. Each - pagination token expires after 24 hours. Using an expired pagination token will return an - HTTP 400 InvalidToken error. + current Region, including enabled accounts and log sources, is retrieved. +- `"nextToken"`: Lists if there are more results available. The value of nextToken is a + unique pagination token for each page. Repeat the call using the returned token to retrieve + the next page. Keep all other arguments unchanged. Each pagination token expires after 24 + hours. Using an expired pagination token will return an HTTP 400 InvalidToken error. """ function get_datalake_status(; aws_config::AbstractAWSConfig=global_aws_config()) return securitylake( @@ -902,11 +911,12 @@ end get_subscriber(id) get_subscriber(id, params::Dict{String,<:Any}) -Retrieves subscription information for the specified subscription ID. +Retrieves the subscription information for the specified subscription ID. You can get +information about a specific subscriber. # Arguments -- `id`: A value created by Security Lake that uniquely identifies your GetSubscriber API - request. +- `id`: A value created by Amazon Security Lake that uniquely identifies your GetSubscriber + API request. """ function get_subscriber(id; aws_config::AbstractAWSConfig=global_aws_config()) @@ -933,15 +943,17 @@ end list_datalake_exceptions() list_datalake_exceptions(params::Dict{String,<:Any}) -List the Amazon Security Lake exceptions that you can use to find the source of problems +Lists the Amazon Security Lake exceptions that you can use to find the source of problems and fix them. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"maxFailures"`: List the maximum number of failures in Security Lake. -- `"nextToken"`: List if there are more results available. if nextToken is returned, You - can make the call again using the returned token to retrieve the next page -- `"regionSet"`: List the regions from which exceptions are retrieved. +- `"nextToken"`: List if there are more results available. The value of nextToken is a + unique pagination token for each page. Repeat the call using the returned token to retrieve + the next page. Keep all other arguments unchanged. Each pagination token expires after 24 + hours. Using an expired pagination token will return an HTTP 400 InvalidToken error. +- `"regionSet"`: List the Amazon Web Services Regions from which exceptions are retrieved. """ function list_datalake_exceptions(; aws_config::AbstractAWSConfig=global_aws_config()) return securitylake( @@ -967,21 +979,22 @@ end list_log_sources() list_log_sources(params::Dict{String,<:Any}) -Lists the log sources in the current region. +Retrieves the log sources in the current Amazon Web Services Region. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"inputOrder"`: Lists the log sources in input order, namely Region, source type, and member account. -- `"listAllDimensions"`: List the view of log sources for enabled Security Lake accounts in - all Regions and source types. +- `"listAllDimensions"`: List the view of log sources for enabled Amazon Security Lake + accounts for specific Amazon Web Services sources from specific accounts and specific + Regions. - `"listSingleDimension"`: List the view of log sources for enabled Security Lake accounts - for the entire region. -- `"listTwoDimensions"`: Lists the log sources for the specified source types in enabled - Security Lake accounts for the entire Region, for selected member accounts. -- `"maxResults"`: The maximum number of accounts for which the configuration is displayed. -- `"nextToken"`: If nextToken is returned, there are more results available. You can make - the call again using the returned token to retrieve the next page. + for all Amazon Web Services sources from specific accounts or specific Regions. +- `"listTwoDimensions"`: Lists the view of log sources for enabled Security Lake accounts + for specific Amazon Web Services sources from specific accounts or specific Regions. +- `"maxResults"`: The maximum number of accounts for which the log sources are displayed. +- `"nextToken"`: If nextToken is returned, there are more results available. You can repeat + the call using the returned token to retrieve the next page. """ function list_log_sources(; aws_config::AbstractAWSConfig=global_aws_config()) return securitylake( @@ -1007,13 +1020,15 @@ end list_subscribers() list_subscribers(params::Dict{String,<:Any}) -List all subscribers for the specific Security Lake account ID. +List all subscribers for the specific Amazon Security Lake account ID. You can retrieve a +list of subscriptions associated with a specific organization or Amazon Web Services +account. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"maxResults"`: The maximum number of accounts for which the configuration is displayed. -- `"nextToken"`: If nextToken is returned, there are more results available. You can make - the call again using the returned token to retrieve the next page. +- `"nextToken"`: If nextToken is returned, there are more results available. You can repeat + the call using the returned token to retrieve the next page. """ function list_subscribers(; aws_config::AbstractAWSConfig=global_aws_config()) return securitylake( @@ -1036,13 +1051,12 @@ end update_datalake(configurations) update_datalake(configurations, params::Dict{String,<:Any}) -Amazon Security Lake allows you to specify where to store your security data and for how -long. You can specify a rollup Region to consolidate data from multiple regions. You can -update the properties of a Region or source. Input can either be directly specified to the -API. +Specifies where to store your security data and for how long. You can add a rollup Region +to consolidate data from multiple Amazon Web Services Regions. # Arguments -- `configurations`: The configuration object +- `configurations`: Specify the Region or Regions that will contribute data to the rollup + region. """ function update_datalake(configurations; aws_config::AbstractAWSConfig=global_aws_config()) @@ -1076,7 +1090,7 @@ end Update the expiration period for the exception message to your preferred time, and control the time-to-live (TTL) for the exception message to remain. Exceptions are stored by -default, for a 2 week period of time from when a record was created in Security Lake. +default for 2 weeks from when a record was created in Amazon Security Lake. # Arguments - `exception_message_expiry`: The time-to-live (TTL) for the exception message to remain. @@ -1117,10 +1131,11 @@ end update_datalake_exceptions_subscription(notification_endpoint, subscription_protocol) update_datalake_exceptions_subscription(notification_endpoint, subscription_protocol, params::Dict{String,<:Any}) -Update the subscription notification for exception notification. +Updates the specified notification subscription in Amazon Security Lake for the +organization you specify. # Arguments -- `notification_endpoint`: The account which is subscribed to receive exception +- `notification_endpoint`: The account that is subscribed to receive exception notifications. - `subscription_protocol`: The subscription protocol to which exception messages are posted. @@ -1167,40 +1182,47 @@ function update_datalake_exceptions_subscription( end """ - update_subscriber(id) - update_subscriber(id, params::Dict{String,<:Any}) + update_subscriber(id, source_types) + update_subscriber(id, source_types, params::Dict{String,<:Any}) -Update the subscription permission for the given Security Lake account ID. +Updates an existing subscription for the given Amazon Security Lake account ID. You can +update a subscriber by changing the sources that the subscriber consumes data from. # Arguments -- `id`: A value created by Security Lake that uniquely identifies your UpdateSubscriber API - request. +- `id`: A value created by Security Lake that uniquely identifies your subscription. +- `source_types`: The supported Amazon Web Services from which logs and events are + collected. For the list of supported Amazon Web Services, see the Amazon Security Lake User + Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"externalId"`: External ID of the Security Lake account. -- `"sourceTypes"`: The supported Amazon Web Services services from which logs and events - are collected. Amazon Security Lake supports logs and events collection for the following - natively-supported Amazon Web Services services. For more information, see the Amazon - Security Lake User Guide. -- `"subscriberDescription"`: Description of the Security Lake account subscriber. -- `"subscriberName"`: Name of the Security Lake account subscriber. +- `"externalId"`: The external ID of the Security Lake account. +- `"subscriberDescription"`: The description of the Security Lake account subscriber. +- `"subscriberName"`: The name of the Security Lake account subscriber. """ -function update_subscriber(id; aws_config::AbstractAWSConfig=global_aws_config()) +function update_subscriber( + id, sourceTypes; aws_config::AbstractAWSConfig=global_aws_config() +) return securitylake( "PUT", - "/v1/subscribers/$(id)"; + "/v1/subscribers/$(id)", + Dict{String,Any}("sourceTypes" => sourceTypes); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end function update_subscriber( - id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() + id, + sourceTypes, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), ) return securitylake( "PUT", "/v1/subscribers/$(id)", - params; + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("sourceTypes" => sourceTypes), params) + ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) @@ -1210,7 +1232,7 @@ end update_subscription_notification_configuration(subscription_id) update_subscription_notification_configuration(subscription_id, params::Dict{String,<:Any}) -Create a new subscription notification or add the existing subscription notification +Creates a new subscription notification or adds the existing subscription notification setting for the specified subscription ID. # Arguments @@ -1220,7 +1242,7 @@ setting for the specified subscription ID. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"createSqs"`: Create a new subscription notification for the specified subscription ID - in Security Lake. + in Amazon Security Lake. - `"httpsApiKeyName"`: The key name for the subscription notification. - `"httpsApiKeyValue"`: The key value for the subscription notification. - `"httpsMethod"`: The HTTPS method used for the subscription notification. diff --git a/src/services/ssm.jl b/src/services/ssm.jl index c994b52905..6d024b5028 100644 --- a/src/services/ssm.jl +++ b/src/services/ssm.jl @@ -4904,13 +4904,14 @@ accounts to view and interact with OpsCenter operational work items (OpsItems). # Arguments - `policy`: A policy you want to associate with a resource. -- `resource_arn`: Amazon Resource Name (ARN) of the resource to which the policies are - attached. +- `resource_arn`: Amazon Resource Name (ARN) of the resource to which you want to attach a + policy. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"PolicyHash"`: ID of the current policy version. The hash helps to prevent a situation - where multiple users attempt to overwrite a policy. + where multiple users attempt to overwrite a policy. You must provide this hash when + updating or deleting a policy. - `"PolicyId"`: The policy ID. """ function put_resource_policy( diff --git a/src/services/support.jl b/src/services/support.jl index a6c26e7074..3420c569e7 100644 --- a/src/services/support.jl +++ b/src/services/support.jl @@ -13,9 +13,9 @@ container for attachments that you add to a case or case communication. The set available for 1 hour after it's created. The expiryTime returned in the response is when the set expires. You must have a Business, Enterprise On-Ramp, or Enterprise Support plan to use the Amazon Web Services Support API. If you call the Amazon Web Services -Support API from an account that does not have a Business, Enterprise On-Ramp, or -Enterprise Support plan, the SubscriptionRequiredException error message appears. For -information about changing your support plan, see Amazon Web Services Support. +Support API from an account that doesn't have a Business, Enterprise On-Ramp, or Enterprise +Support plan, the SubscriptionRequiredException error message appears. For information +about changing your support plan, see Amazon Web Services Support. # Arguments - `attachments`: One or more attachments to add to the set. You can add up to three @@ -65,8 +65,8 @@ caseId parameter to identify the case to which to add communication. You can lis email addresses to copy on the communication by using the ccEmailAddresses parameter. The communicationBody value contains the text of the communication. You must have a Business, Enterprise On-Ramp, or Enterprise Support plan to use the Amazon Web Services -Support API. If you call the Amazon Web Services Support API from an account that does -not have a Business, Enterprise On-Ramp, or Enterprise Support plan, the +Support API. If you call the Amazon Web Services Support API from an account that +doesn't have a Business, Enterprise On-Ramp, or Enterprise Support plan, the SubscriptionRequiredException error message appears. For information about changing your support plan, see Amazon Web Services Support. @@ -127,7 +127,7 @@ existing case. The caseId is separate from the displayId that appears in the Ama Services Support Center. Use the DescribeCases operation to get the displayId. You must have a Business, Enterprise On-Ramp, or Enterprise Support plan to use the Amazon Web Services Support API. If you call the Amazon Web Services Support API from an account -that does not have a Business, Enterprise On-Ramp, or Enterprise Support plan, the +that doesn't have a Business, Enterprise On-Ramp, or Enterprise Support plan, the SubscriptionRequiredException error message appears. For information about changing your support plan, see Amazon Web Services Support. @@ -150,9 +150,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys the Amazon Web Services SDKs. - `"issueType"`: The type of issue for the case. You can specify customer-service or technical. If you don't specify a value, the default is technical. -- `"language"`: The language in which Amazon Web Services Support handles the case. You - must specify the ISO 639-1 code for the language parameter if you want support in that - language. Currently, English (\"en\") and Japanese (\"ja\") are supported. +- `"language"`: The language in which Amazon Web Services Support handles the case. Amazon + Web Services Support currently supports English (\"en\") and Japanese (\"ja\"). You must + specify the ISO 639-1 code for the language parameter if you want support in that language. - `"serviceCode"`: The code for the Amazon Web Services service. You can use the DescribeServices operation to get the possible serviceCode values. - `"severityCode"`: A value that indicates the urgency of the case. This value determines @@ -204,7 +204,7 @@ case management system when you add an attachment to a case or case communicatio Attachment IDs are returned in the AttachmentDetails objects that are returned by the DescribeCommunications operation. You must have a Business, Enterprise On-Ramp, or Enterprise Support plan to use the Amazon Web Services Support API. If you call the -Amazon Web Services Support API from an account that does not have a Business, Enterprise +Amazon Web Services Support API from an account that doesn't have a Business, Enterprise On-Ramp, or Enterprise Support plan, the SubscriptionRequiredException error message appears. For information about changing your support plan, see Amazon Web Services Support. @@ -252,7 +252,7 @@ records represented by the CaseDetails objects. Case data is available for 12 after creation. If a case was created more than 12 months ago, a request might return an error. You must have a Business, Enterprise On-Ramp, or Enterprise Support plan to use the Amazon Web Services Support API. If you call the Amazon Web Services Support API -from an account that does not have a Business, Enterprise On-Ramp, or Enterprise Support +from an account that doesn't have a Business, Enterprise On-Ramp, or Enterprise Support plan, the SubscriptionRequiredException error message appears. For information about changing your support plan, see Amazon Web Services Support. @@ -270,9 +270,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys DescribeCases response. By default, communications are included. - `"includeResolvedCases"`: Specifies whether to include resolved support cases in the DescribeCases response. By default, resolved cases aren't included. -- `"language"`: The ISO 639-1 code for the language in which Amazon Web Services provides - support. Amazon Web Services Support currently supports English (\"en\") and Japanese - (\"ja\"). Language parameters must be passed explicitly for operations that take them. +- `"language"`: The language in which Amazon Web Services Support handles the case. Amazon + Web Services Support currently supports English (\"en\") and Japanese (\"ja\"). You must + specify the ISO 639-1 code for the language parameter if you want support in that language. - `"maxResults"`: The maximum number of results to return before paginating. - `"nextToken"`: A resumption point for pagination. """ @@ -299,7 +299,7 @@ the maxResults and nextToken parameters to control the pagination of the results maxResults to the number of cases that you want to display on each page, and use nextToken to specify the resumption of pagination. You must have a Business, Enterprise On-Ramp, or Enterprise Support plan to use the Amazon Web Services Support API. If you call the -Amazon Web Services Support API from an account that does not have a Business, Enterprise +Amazon Web Services Support API from an account that doesn't have a Business, Enterprise On-Ramp, or Enterprise Support plan, the SubscriptionRequiredException error message appears. For information about changing your support plan, see Amazon Web Services Support. @@ -351,15 +351,15 @@ operation. Always use the service codes and categories that the DescribeServices returns, so that you have the most recent set of service and category codes. You must have a Business, Enterprise On-Ramp, or Enterprise Support plan to use the Amazon Web Services Support API. If you call the Amazon Web Services Support API from an account -that does not have a Business, Enterprise On-Ramp, or Enterprise Support plan, the +that doesn't have a Business, Enterprise On-Ramp, or Enterprise Support plan, the SubscriptionRequiredException error message appears. For information about changing your support plan, see Amazon Web Services Support. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"language"`: The ISO 639-1 code for the language in which Amazon Web Services provides - support. Amazon Web Services Support currently supports English (\"en\") and Japanese - (\"ja\"). Language parameters must be passed explicitly for operations that take them. +- `"language"`: The language in which Amazon Web Services Support handles the case. Amazon + Web Services Support currently supports English (\"en\") and Japanese (\"ja\"). You must + specify the ISO 639-1 code for the language parameter if you want support in that language. - `"serviceCodeList"`: A JSON-formatted list of service codes available for Amazon Web Services services. """ @@ -384,15 +384,15 @@ Returns the list of severity levels that you can assign to a support case. The s level for a case is also a field in the CaseDetails data type that you include for a CreateCase request. You must have a Business, Enterprise On-Ramp, or Enterprise Support plan to use the Amazon Web Services Support API. If you call the Amazon Web Services -Support API from an account that does not have a Business, Enterprise On-Ramp, or -Enterprise Support plan, the SubscriptionRequiredException error message appears. For -information about changing your support plan, see Amazon Web Services Support. +Support API from an account that doesn't have a Business, Enterprise On-Ramp, or Enterprise +Support plan, the SubscriptionRequiredException error message appears. For information +about changing your support plan, see Amazon Web Services Support. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"language"`: The ISO 639-1 code for the language in which Amazon Web Services provides - support. Amazon Web Services Support currently supports English (\"en\") and Japanese - (\"ja\"). Language parameters must be passed explicitly for operations that take them. +- `"language"`: The language in which Amazon Web Services Support handles the case. Amazon + Web Services Support currently supports English (\"en\") and Japanese (\"ja\"). You must + specify the ISO 639-1 code for the language parameter if you want support in that language. """ function describe_severity_levels(; aws_config::AbstractAWSConfig=global_aws_config()) return support( @@ -420,10 +420,14 @@ checks are refreshed automatically, and you can't return their refresh statuses the DescribeTrustedAdvisorCheckRefreshStatuses operation. If you call this operation for these checks, you might see an InvalidParameterValue error. You must have a Business, Enterprise On-Ramp, or Enterprise Support plan to use the Amazon Web Services Support API. - If you call the Amazon Web Services Support API from an account that does not have a + If you call the Amazon Web Services Support API from an account that doesn't have a Business, Enterprise On-Ramp, or Enterprise Support plan, the SubscriptionRequiredException error message appears. For information about changing your support plan, see Amazon Web -Services Support. +Services Support. To call the Trusted Advisor operations in the Amazon Web Services +Support API, you must use the US East (N. Virginia) endpoint. Currently, the US West +(Oregon) and Europe (Ireland) endpoints don't support the Trusted Advisor operations. For +more information, see About the Amazon Web Services Support API in the Amazon Web Services +Support User Guide. # Arguments - `check_ids`: The IDs of the Trusted Advisor checks to get the status. If you specify the @@ -469,19 +473,24 @@ status - The alert status of the check can be ok (green), warning (yellow), erro not_available. timestamp - The time of the last refresh of the check. checkId - The unique identifier for the check. You must have a Business, Enterprise On-Ramp, or Enterprise Support plan to use the Amazon Web Services Support API. If you call the -Amazon Web Services Support API from an account that does not have a Business, Enterprise +Amazon Web Services Support API from an account that doesn't have a Business, Enterprise On-Ramp, or Enterprise Support plan, the SubscriptionRequiredException error message appears. For information about changing your support plan, see Amazon Web Services Support. - + To call the Trusted Advisor operations in the Amazon Web Services Support API, you must +use the US East (N. Virginia) endpoint. Currently, the US West (Oregon) and Europe +(Ireland) endpoints don't support the Trusted Advisor operations. For more information, see +About the Amazon Web Services Support API in the Amazon Web Services Support User Guide. # Arguments - `check_id`: The unique identifier for the Trusted Advisor check. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"language"`: The ISO 639-1 code for the language in which Amazon Web Services provides - support. Amazon Web Services Support currently supports English (\"en\") and Japanese - (\"ja\"). Language parameters must be passed explicitly for operations that take them. +- `"language"`: The ISO 639-1 code for the language that you want your check results to + appear in. The Amazon Web Services Support API currently supports the following languages + for Trusted Advisor: Chinese, Simplified - zh Chinese, Traditional - zh_TW English + - en French - fr German - de Indonesian - id Italian - it Japanese - ja + Korean - ko Portuguese, Brazilian - pt_BR Spanish - es """ function describe_trusted_advisor_check_result( checkId; aws_config::AbstractAWSConfig=global_aws_config() @@ -512,10 +521,14 @@ Returns the results for the Trusted Advisor check summaries for the check IDs th specified. You can get the check IDs by calling the DescribeTrustedAdvisorChecks operation. The response contains an array of TrustedAdvisorCheckSummary objects. You must have a Business, Enterprise On-Ramp, or Enterprise Support plan to use the Amazon Web Services -Support API. If you call the Amazon Web Services Support API from an account that does -not have a Business, Enterprise On-Ramp, or Enterprise Support plan, the +Support API. If you call the Amazon Web Services Support API from an account that +doesn't have a Business, Enterprise On-Ramp, or Enterprise Support plan, the SubscriptionRequiredException error message appears. For information about changing your -support plan, see Amazon Web Services Support. +support plan, see Amazon Web Services Support. To call the Trusted Advisor operations in +the Amazon Web Services Support API, you must use the US East (N. Virginia) endpoint. +Currently, the US West (Oregon) and Europe (Ireland) endpoints don't support the Trusted +Advisor operations. For more information, see About the Amazon Web Services Support API in +the Amazon Web Services Support User Guide. # Arguments - `check_ids`: The IDs of the Trusted Advisor checks. @@ -551,21 +564,26 @@ end describe_trusted_advisor_checks(language, params::Dict{String,<:Any}) Returns information about all available Trusted Advisor checks, including the name, ID, -category, description, and metadata. You must specify a language code. The Amazon Web -Services Support API currently supports English (\"en\") and Japanese (\"ja\"). The -response contains a TrustedAdvisorCheckDescription object for each check. You must set the -Amazon Web Services Region to us-east-1. You must have a Business, Enterprise On-Ramp, -or Enterprise Support plan to use the Amazon Web Services Support API. If you call the -Amazon Web Services Support API from an account that does not have a Business, Enterprise +category, description, and metadata. You must specify a language code. The response +contains a TrustedAdvisorCheckDescription object for each check. You must set the Amazon +Web Services Region to us-east-1. You must have a Business, Enterprise On-Ramp, or +Enterprise Support plan to use the Amazon Web Services Support API. If you call the +Amazon Web Services Support API from an account that doesn't have a Business, Enterprise On-Ramp, or Enterprise Support plan, the SubscriptionRequiredException error message appears. For information about changing your support plan, see Amazon Web Services Support. The names and descriptions for Trusted Advisor checks are subject to change. We recommend -that you specify the check ID in your code to uniquely identify a check. +that you specify the check ID in your code to uniquely identify a check. To call the +Trusted Advisor operations in the Amazon Web Services Support API, you must use the US East +(N. Virginia) endpoint. Currently, the US West (Oregon) and Europe (Ireland) endpoints +don't support the Trusted Advisor operations. For more information, see About the Amazon +Web Services Support API in the Amazon Web Services Support User Guide. # Arguments -- `language`: The ISO 639-1 code for the language in which Amazon Web Services provides - support. Amazon Web Services Support currently supports English (\"en\") and Japanese - (\"ja\"). Language parameters must be passed explicitly for operations that take them. +- `language`: The ISO 639-1 code for the language that you want your checks to appear in. + The Amazon Web Services Support API currently supports the following languages for Trusted + Advisor: Chinese, Simplified - zh Chinese, Traditional - zh_TW English - en + French - fr German - de Indonesian - id Italian - it Japanese - ja Korean - + ko Portuguese, Brazilian - pt_BR Spanish - es """ function describe_trusted_advisor_checks( @@ -598,15 +616,18 @@ end refresh_trusted_advisor_check(check_id, params::Dict{String,<:Any}) Refreshes the Trusted Advisor check that you specify using the check ID. You can get the -check IDs by calling the DescribeTrustedAdvisorChecks operation. Some checks are refreshed +check IDs by calling the DescribeTrustedAdvisorChecks operation. Some checks are refreshed automatically. If you call the RefreshTrustedAdvisorCheck operation to refresh them, you -might see the InvalidParameterValue error. The response contains a +might see the InvalidParameterValue error. The response contains a TrustedAdvisorCheckRefreshStatus object. You must have a Business, Enterprise On-Ramp, or Enterprise Support plan to use the Amazon Web Services Support API. If you call the -Amazon Web Services Support API from an account that does not have a Business, Enterprise +Amazon Web Services Support API from an account that doesn't have a Business, Enterprise On-Ramp, or Enterprise Support plan, the SubscriptionRequiredException error message appears. For information about changing your support plan, see Amazon Web Services Support. - + To call the Trusted Advisor operations in the Amazon Web Services Support API, you must +use the US East (N. Virginia) endpoint. Currently, the US West (Oregon) and Europe +(Ireland) endpoints don't support the Trusted Advisor operations. For more information, see +About the Amazon Web Services Support API in the Amazon Web Services Support User Guide. # Arguments - `check_id`: The unique identifier for the Trusted Advisor check to refresh. Specifying @@ -642,9 +663,9 @@ end Resolves a support case. This operation takes a caseId and returns the initial and final state of the case. You must have a Business, Enterprise On-Ramp, or Enterprise Support plan to use the Amazon Web Services Support API. If you call the Amazon Web Services -Support API from an account that does not have a Business, Enterprise On-Ramp, or -Enterprise Support plan, the SubscriptionRequiredException error message appears. For -information about changing your support plan, see Amazon Web Services Support. +Support API from an account that doesn't have a Business, Enterprise On-Ramp, or Enterprise +Support plan, the SubscriptionRequiredException error message appears. For information +about changing your support plan, see Amazon Web Services Support. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: diff --git a/src/services/transfer.jl b/src/services/transfer.jl index a1b859cc78..9965dc2d9f 100644 --- a/src/services/transfer.jl +++ b/src/services/transfer.jl @@ -418,8 +418,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys server. - `"Tags"`: Key-value pairs that can be used to group and search for servers. - `"WorkflowDetails"`: Specifies the workflow ID for the workflow to assign and the - execution role that's used for executing the workflow. In addition to a workflow to execute - when a file is uploaded completely, WorkflowDetails can also contain a workflow ID (and + execution role that's used for executing the workflow. In additon to a workflow to execute + when a file is uploaded completely, WorkflowDeatails can also contain a workflow ID (and execution role) for a workflow to execute on partial upload. A partial upload occurs when a file is open when the session disconnects. """ @@ -496,12 +496,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys in Amazon EFS determine the level of access your users get when transferring files into and out of your Amazon EFS file systems. - `"SshPublicKeyBody"`: The public portion of the Secure Shell (SSH) key used to - authenticate the user to the server. The three standard SSH public key format elements are - <key type>, <body base64>, and an optional <comment>, with spaces between - each element. Transfer Family accepts RSA, ECDSA, and ED25519 keys. For RSA keys, the key - type is ssh-rsa. For ED25519 keys, the key type is ssh-ed25519. For ECDSA keys, the key - type is either ecdsa-sha2-nistp256, ecdsa-sha2-nistp384, or ecdsa-sha2-nistp521, depending - on the size of the key you generated. + authenticate the user to the server. Transfer Family accepts RSA, ECDSA, and ED25519 keys. - `"Tags"`: Key-value pairs that can be used to group and search for users. Tags are metadata attached to users for any purpose. """ @@ -2742,8 +2737,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"SecurityPolicyName"`: Specifies the name of the security policy that is attached to the server. - `"WorkflowDetails"`: Specifies the workflow ID for the workflow to assign and the - execution role that's used for executing the workflow. In addition to a workflow to execute - when a file is uploaded completely, WorkflowDetails can also contain a workflow ID (and + execution role that's used for executing the workflow. In additon to a workflow to execute + when a file is uploaded completely, WorkflowDeatails can also contain a workflow ID (and execution role) for a workflow to execute on partial upload. A partial upload occurs when a file is open when the session disconnects. To remove an associated workflow from a server, you can provide an empty OnUpload object, as in the following example. aws transfer