diff --git a/src/AWSServices.jl b/src/AWSServices.jl index 259e8f806..2cb0ef193 100644 --- a/src/AWSServices.jl +++ b/src/AWSServices.jl @@ -640,6 +640,7 @@ const privatenetworks = AWS.RestJSONService( "private-networks", "private-networks", "2021-12-03" ) const proton = AWS.JSONService("proton", "proton", "2020-07-20", "1.0", "AwsProton20200720") +const qapps = AWS.RestJSONService("qapps", "data.qapps", "2023-11-27") const qbusiness = AWS.RestJSONService("qbusiness", "qbusiness", "2023-11-27") const qconnect = AWS.RestJSONService("wisdom", "wisdom", "2020-10-19") const qldb = AWS.RestJSONService("qldb", "qldb", "2019-01-02") diff --git a/src/services/acm.jl b/src/services/acm.jl index 1e05ab231..68d4b6e64 100644 --- a/src/services/acm.jl +++ b/src/services/acm.jl @@ -223,10 +223,12 @@ end get_certificate(certificate_arn) get_certificate(certificate_arn, params::Dict{String,<:Any}) -Retrieves an Amazon-issued certificate and its certificate chain. The chain consists of the -certificate of the issuing CA and the intermediate certificates of any other subordinate -CAs. All of the certificates are base64 encoded. You can use OpenSSL to decode the -certificates and inspect individual fields. +Retrieves a certificate and its certificate chain. The certificate may be either a public +or private certificate issued using the ACM RequestCertificate action, or a certificate +imported into ACM using the ImportCertificate action. The chain consists of the certificate +of the issuing CA and the intermediate certificates of any other subordinate CAs. All of +the certificates are base64 encoded. You can use OpenSSL to decode the certificates and +inspect individual fields. # Arguments - `certificate_arn`: String that contains a certificate ARN in the following format: @@ -271,23 +273,21 @@ ACM does not provide managed renewal for certificates that you import. Note the guidelines when importing third party certificates: You must enter the private key that matches the certificate you are importing. The private key must be unencrypted. You cannot import a private key that is protected by a password or a passphrase. The private -key must be no larger than 5 KB (5,120 bytes). If the certificate you are importing is -not self-signed, you must enter its certificate chain. If a certificate chain is -included, the issuer must be the subject of one of the certificates in the chain. The -certificate, private key, and certificate chain must be PEM-encoded. The current time -must be between the Not Before and Not After certificate fields. The Issuer field must -not be empty. The OCSP authority URL, if present, must not exceed 1000 characters. To -import a new certificate, omit the CertificateArn argument. Include this argument only when -you want to replace a previously imported certificate. When you import a certificate by -using the CLI, you must specify the certificate, the certificate chain, and the private key -by their file names preceded by fileb://. For example, you can specify a certificate saved -in the C:temp folder as fileb://C:tempcertificate_to_import.pem. If you are making an HTTP -or HTTPS Query request, include these arguments as BLOBs. When you import a certificate -by using an SDK, you must specify the certificate, the certificate chain, and the private -key files in the manner required by the programming language you're using. The -cryptographic algorithm of an imported certificate must match the algorithm of the signing -CA. For example, if the signing CA key type is RSA, then the certificate key type must also -be RSA. This operation returns the Amazon Resource Name (ARN) of the imported certificate. +key must be no larger than 5 KB (5,120 bytes). The certificate, private key, and +certificate chain must be PEM-encoded. The current time must be between the Not Before +and Not After certificate fields. The Issuer field must not be empty. The OCSP +authority URL, if present, must not exceed 1000 characters. To import a new certificate, +omit the CertificateArn argument. Include this argument only when you want to replace a +previously imported certificate. When you import a certificate by using the CLI, you must +specify the certificate, the certificate chain, and the private key by their file names +preceded by fileb://. For example, you can specify a certificate saved in the C:temp folder +as fileb://C:tempcertificate_to_import.pem. If you are making an HTTP or HTTPS Query +request, include these arguments as BLOBs. When you import a certificate by using an +SDK, you must specify the certificate, the certificate chain, and the private key files in +the manner required by the programming language you're using. The cryptographic +algorithm of an imported certificate must match the algorithm of the signing CA. For +example, if the signing CA key type is RSA, then the certificate key type must also be RSA. + This operation returns the Amazon Resource Name (ARN) of the imported certificate. # Arguments - `certificate`: The certificate to import. @@ -335,10 +335,12 @@ end list_certificates() list_certificates(params::Dict{String,<:Any}) -Retrieves a list of certificate ARNs and domain names. You can request that only -certificates that match a specific status be listed. You can also filter by specific -attributes of the certificate. Default filtering returns only RSA_2048 certificates. For -more information, see Filters. +Retrieves a list of certificate ARNs and domain names. By default, the API returns RSA_2048 +certificates. To return all certificates in the account, include the keyType filter with +the values [RSA_1024, RSA_2048, RSA_3072, RSA_4096, EC_prime256v1, EC_secp384r1, +EC_secp521r1]. In addition to keyType, you can also filter by the CertificateStatuses, +keyUsage, and extendedKeyUsage attributes on the certificate. For more information, see +Filters. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -592,10 +594,16 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys certificate uses to encrypt data. RSA is the default key algorithm for ACM certificates. Elliptic Curve Digital Signature Algorithm (ECDSA) keys are smaller, offering security comparable to RSA keys but with greater computing efficiency. However, ECDSA is not - supported by all network clients. Some AWS services may require RSA keys, or only support - ECDSA keys of a particular size, while others allow the use of either RSA and ECDSA keys to - ensure that compatibility is not broken. Check the requirements for the AWS service where - you plan to deploy your certificate. Default: RSA_2048 + supported by all network clients. Some Amazon Web Services services may require RSA keys, + or only support ECDSA keys of a particular size, while others allow the use of either RSA + and ECDSA keys to ensure that compatibility is not broken. Check the requirements for the + Amazon Web Services service where you plan to deploy your certificate. For more information + about selecting an algorithm, see Key algorithms. Algorithms supported for an ACM + certificate request include: RSA_2048 EC_prime256v1 EC_secp384r1 Other + listed algorithms are for imported certificates only. When you request a private PKI + certificate signed by a CA from Amazon Web Services Private CA, the specified signing + algorithm family (RSA or ECDSA) must match the algorithm family of the CA's secret key. + Default: RSA_2048 - `"Options"`: Currently, you can use this parameter to specify whether to add the certificate to a certificate transparency log. Certificate transparency makes it possible to detect SSL/TLS certificates that have been mistakenly or maliciously issued. diff --git a/src/services/application_auto_scaling.jl b/src/services/application_auto_scaling.jl index e50633819..c57f3b09e 100644 --- a/src/services/application_auto_scaling.jl +++ b/src/services/application_auto_scaling.jl @@ -19,7 +19,7 @@ scaling policy in the Application Auto Scaling User Guide. - `resource_id`: The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier. ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: - service/default/sample-webapp. Spot Fleet - The resource type is spot-fleet-request and + service/my-cluster/my-service. Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. @@ -51,26 +51,28 @@ scaling policy in the Application Auto Scaling User Guide. 2e31-5. Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster. Neptune cluster - The resource type is cluster and the unique identifier is the cluster - name. Example: cluster:mycluster. SageMaker Serverless endpoint - The resource type is + name. Example: cluster:mycluster. SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: - inference-component/my-inference-component. + inference-component/my-inference-component. Pool of WorkSpaces - The resource type is + workspacespool and the unique identifier is the pool ID. Example: + workspacespool/wspool-123456. - `scalable_dimension`: The scalable dimension. This string consists of the service - namespace, resource type, and scaling property. ecs:service:DesiredCount - The desired - task count of an ECS service. elasticmapreduce:instancegroup:InstanceCount - The - instance count of an EMR Instance Group. ec2:spot-fleet-request:TargetCapacity - The - target capacity of a Spot Fleet. appstream:fleet:DesiredCapacity - The desired capacity - of an AppStream 2.0 fleet. dynamodb:table:ReadCapacityUnits - The provisioned read - capacity for a DynamoDB table. dynamodb:table:WriteCapacityUnits - The provisioned write - capacity for a DynamoDB table. dynamodb:index:ReadCapacityUnits - The provisioned read - capacity for a DynamoDB global secondary index. dynamodb:index:WriteCapacityUnits - The - provisioned write capacity for a DynamoDB global secondary index. - rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. - Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition. - sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model - endpoint variant. custom-resource:ResourceType:Property - The scalable dimension for a - custom resource provided by your own application or service. + namespace, resource type, and scaling property. ecs:service:DesiredCount - The task + count of an ECS service. elasticmapreduce:instancegroup:InstanceCount - The instance + count of an EMR Instance Group. ec2:spot-fleet-request:TargetCapacity - The target + capacity of a Spot Fleet. appstream:fleet:DesiredCapacity - The capacity of an AppStream + 2.0 fleet. dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a + DynamoDB table. dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a + DynamoDB table. dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a + DynamoDB global secondary index. dynamodb:index:WriteCapacityUnits - The provisioned + write capacity for a DynamoDB global secondary index. rds:cluster:ReadReplicaCount - The + count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible + edition and Aurora PostgreSQL-compatible edition. sagemaker:variant:DesiredInstanceCount + - The number of EC2 instances for a SageMaker model endpoint variant. + custom-resource:ResourceType:Property - The scalable dimension for a custom resource + provided by your own application or service. comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint. comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units @@ -85,8 +87,10 @@ scaling policy in the Application Auto Scaling User Guide. Amazon ElastiCache replication group. neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster. sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a - SageMaker Serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The + SageMaker serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component. + workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the + WorkSpaces in the pool. - `service_namespace`: The namespace of the Amazon Web Services service that provides the resource. For a resource provided by your own application or service, use custom-resource instead. @@ -149,7 +153,7 @@ more information, see Delete a scheduled action in the Application Auto Scaling - `resource_id`: The identifier of the resource associated with the scheduled action. This string consists of the resource type and unique identifier. ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: - service/default/sample-webapp. Spot Fleet - The resource type is spot-fleet-request and + service/my-cluster/my-service. Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. @@ -181,26 +185,28 @@ more information, see Delete a scheduled action in the Application Auto Scaling 2e31-5. Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster. Neptune cluster - The resource type is cluster and the unique identifier is the cluster - name. Example: cluster:mycluster. SageMaker Serverless endpoint - The resource type is + name. Example: cluster:mycluster. SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: - inference-component/my-inference-component. + inference-component/my-inference-component. Pool of WorkSpaces - The resource type is + workspacespool and the unique identifier is the pool ID. Example: + workspacespool/wspool-123456. - `scalable_dimension`: The scalable dimension. This string consists of the service - namespace, resource type, and scaling property. ecs:service:DesiredCount - The desired - task count of an ECS service. elasticmapreduce:instancegroup:InstanceCount - The - instance count of an EMR Instance Group. ec2:spot-fleet-request:TargetCapacity - The - target capacity of a Spot Fleet. appstream:fleet:DesiredCapacity - The desired capacity - of an AppStream 2.0 fleet. dynamodb:table:ReadCapacityUnits - The provisioned read - capacity for a DynamoDB table. dynamodb:table:WriteCapacityUnits - The provisioned write - capacity for a DynamoDB table. dynamodb:index:ReadCapacityUnits - The provisioned read - capacity for a DynamoDB global secondary index. dynamodb:index:WriteCapacityUnits - The - provisioned write capacity for a DynamoDB global secondary index. - rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. - Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition. - sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model - endpoint variant. custom-resource:ResourceType:Property - The scalable dimension for a - custom resource provided by your own application or service. + namespace, resource type, and scaling property. ecs:service:DesiredCount - The task + count of an ECS service. elasticmapreduce:instancegroup:InstanceCount - The instance + count of an EMR Instance Group. ec2:spot-fleet-request:TargetCapacity - The target + capacity of a Spot Fleet. appstream:fleet:DesiredCapacity - The capacity of an AppStream + 2.0 fleet. dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a + DynamoDB table. dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a + DynamoDB table. dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a + DynamoDB global secondary index. dynamodb:index:WriteCapacityUnits - The provisioned + write capacity for a DynamoDB global secondary index. rds:cluster:ReadReplicaCount - The + count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible + edition and Aurora PostgreSQL-compatible edition. sagemaker:variant:DesiredInstanceCount + - The number of EC2 instances for a SageMaker model endpoint variant. + custom-resource:ResourceType:Property - The scalable dimension for a custom resource + provided by your own application or service. comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint. comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units @@ -215,8 +221,10 @@ more information, see Delete a scheduled action in the Application Auto Scaling Amazon ElastiCache replication group. neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster. sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a - SageMaker Serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The + SageMaker serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component. + workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the + WorkSpaces in the pool. - `scheduled_action_name`: The name of the scheduled action. - `service_namespace`: The namespace of the Amazon Web Services service that provides the resource. For a resource provided by your own application or service, use custom-resource @@ -282,7 +290,7 @@ with it. - `resource_id`: The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier. ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: - service/default/sample-webapp. Spot Fleet - The resource type is spot-fleet-request and + service/my-cluster/my-service. Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. @@ -314,17 +322,19 @@ with it. 2e31-5. Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster. Neptune cluster - The resource type is cluster and the unique identifier is the cluster - name. Example: cluster:mycluster. SageMaker Serverless endpoint - The resource type is + name. Example: cluster:mycluster. SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: - inference-component/my-inference-component. + inference-component/my-inference-component. Pool of WorkSpaces - The resource type is + workspacespool and the unique identifier is the pool ID. Example: + workspacespool/wspool-123456. - `scalable_dimension`: The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property. - ecs:service:DesiredCount - The desired task count of an ECS service. + ecs:service:DesiredCount - The task count of an ECS service. elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group. ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet. - appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet. + appstream:fleet:DesiredCapacity - The capacity of an AppStream 2.0 fleet. dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table. dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table. dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global @@ -349,8 +359,10 @@ with it. Amazon ElastiCache replication group. neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster. sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a - SageMaker Serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The + SageMaker serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component. + workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the + WorkSpaces in the pool. - `service_namespace`: The namespace of the Amazon Web Services service that provides the resource. For a resource provided by your own application or service, use custom-resource instead. @@ -421,7 +433,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"ResourceIds"`: The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier. ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: - service/default/sample-webapp. Spot Fleet - The resource type is spot-fleet-request and + service/my-cluster/my-service. Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. @@ -453,18 +465,20 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys 2e31-5. Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster. Neptune cluster - The resource type is cluster and the unique identifier is the cluster - name. Example: cluster:mycluster. SageMaker Serverless endpoint - The resource type is + name. Example: cluster:mycluster. SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: - inference-component/my-inference-component. + inference-component/my-inference-component. Pool of WorkSpaces - The resource type is + workspacespool and the unique identifier is the pool ID. Example: + workspacespool/wspool-123456. - `"ScalableDimension"`: The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID. - ecs:service:DesiredCount - The desired task count of an ECS service. + ecs:service:DesiredCount - The task count of an ECS service. elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group. ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet. - appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet. + appstream:fleet:DesiredCapacity - The capacity of an AppStream 2.0 fleet. dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table. dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table. dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global @@ -489,8 +503,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Amazon ElastiCache replication group. neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster. sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a - SageMaker Serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The + SageMaker serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component. + workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the + WorkSpaces in the pool. """ function describe_scalable_targets( ServiceNamespace; aws_config::AbstractAWSConfig=global_aws_config() @@ -549,7 +565,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"ResourceId"`: The identifier of the resource associated with the scaling activity. This string consists of the resource type and unique identifier. ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: - service/default/sample-webapp. Spot Fleet - The resource type is spot-fleet-request and + service/my-cluster/my-service. Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. @@ -581,20 +597,22 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys 2e31-5. Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster. Neptune cluster - The resource type is cluster and the unique identifier is the cluster - name. Example: cluster:mycluster. SageMaker Serverless endpoint - The resource type is + name. Example: cluster:mycluster. SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: - inference-component/my-inference-component. + inference-component/my-inference-component. Pool of WorkSpaces - The resource type is + workspacespool and the unique identifier is the pool ID. Example: + workspacespool/wspool-123456. - `"ScalableDimension"`: The scalable dimension. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you - must also specify a resource ID. ecs:service:DesiredCount - The desired task count of an - ECS service. elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR + must also specify a resource ID. ecs:service:DesiredCount - The task count of an ECS + service. elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group. ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot - Fleet. appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet. - dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table. - dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table. - dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global + Fleet. appstream:fleet:DesiredCapacity - The capacity of an AppStream 2.0 fleet. + dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table. + dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table. + dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index. dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index. rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora @@ -616,8 +634,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Amazon ElastiCache replication group. neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster. sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a - SageMaker Serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The + SageMaker serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component. + workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the + WorkSpaces in the pool. """ function describe_scaling_activities( ServiceNamespace; aws_config::AbstractAWSConfig=global_aws_config() @@ -672,7 +692,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"ResourceId"`: The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier. ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: - service/default/sample-webapp. Spot Fleet - The resource type is spot-fleet-request and + service/my-cluster/my-service. Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. @@ -704,20 +724,22 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys 2e31-5. Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster. Neptune cluster - The resource type is cluster and the unique identifier is the cluster - name. Example: cluster:mycluster. SageMaker Serverless endpoint - The resource type is + name. Example: cluster:mycluster. SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: - inference-component/my-inference-component. + inference-component/my-inference-component. Pool of WorkSpaces - The resource type is + workspacespool and the unique identifier is the pool ID. Example: + workspacespool/wspool-123456. - `"ScalableDimension"`: The scalable dimension. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you - must also specify a resource ID. ecs:service:DesiredCount - The desired task count of an - ECS service. elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR + must also specify a resource ID. ecs:service:DesiredCount - The task count of an ECS + service. elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group. ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot - Fleet. appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet. - dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table. - dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table. - dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global + Fleet. appstream:fleet:DesiredCapacity - The capacity of an AppStream 2.0 fleet. + dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table. + dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table. + dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index. dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index. rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora @@ -739,8 +761,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Amazon ElastiCache replication group. neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster. sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a - SageMaker Serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The + SageMaker serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component. + workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the + WorkSpaces in the pool. """ function describe_scaling_policies( ServiceNamespace; aws_config::AbstractAWSConfig=global_aws_config() @@ -775,8 +799,8 @@ end Describes the Application Auto Scaling scheduled actions for the specified service namespace. You can filter the results using the ResourceId, ScalableDimension, and -ScheduledActionNames parameters. For more information, see Scheduled scaling and Managing -scheduled scaling in the Application Auto Scaling User Guide. +ScheduledActionNames parameters. For more information, see Scheduled scaling in the +Application Auto Scaling User Guide. # Arguments - `service_namespace`: The namespace of the Amazon Web Services service that provides the @@ -794,7 +818,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"ResourceId"`: The identifier of the resource associated with the scheduled action. This string consists of the resource type and unique identifier. ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: - service/default/sample-webapp. Spot Fleet - The resource type is spot-fleet-request and + service/my-cluster/my-service. Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. @@ -826,20 +850,22 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys 2e31-5. Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster. Neptune cluster - The resource type is cluster and the unique identifier is the cluster - name. Example: cluster:mycluster. SageMaker Serverless endpoint - The resource type is + name. Example: cluster:mycluster. SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: - inference-component/my-inference-component. + inference-component/my-inference-component. Pool of WorkSpaces - The resource type is + workspacespool and the unique identifier is the pool ID. Example: + workspacespool/wspool-123456. - `"ScalableDimension"`: The scalable dimension. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you - must also specify a resource ID. ecs:service:DesiredCount - The desired task count of an - ECS service. elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR + must also specify a resource ID. ecs:service:DesiredCount - The task count of an ECS + service. elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group. ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot - Fleet. appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet. - dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table. - dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table. - dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global + Fleet. appstream:fleet:DesiredCapacity - The capacity of an AppStream 2.0 fleet. + dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table. + dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table. + dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index. dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index. rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora @@ -861,8 +887,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Amazon ElastiCache replication group. neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster. sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a - SageMaker Serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The + SageMaker serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component. + workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the + WorkSpaces in the pool. - `"ScheduledActionNames"`: The names of the scheduled actions to describe. """ function describe_scheduled_actions( @@ -897,8 +925,8 @@ end list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) Returns all the tags on the specified Application Auto Scaling scalable target. For general -information about tags, including the format and syntax, see Tagging Amazon Web Services -resources in the Amazon Web Services General Reference. +information about tags, including the format and syntax, see Tagging your Amazon Web +Services resources in the Amazon Web Services General Reference. # Arguments - `resource_arn`: Specify the ARN of the scalable target. For example: @@ -964,7 +992,7 @@ scaling policies that were specified for the scalable target are deleted. - `resource_id`: The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier. ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: - service/default/sample-webapp. Spot Fleet - The resource type is spot-fleet-request and + service/my-cluster/my-service. Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. @@ -996,26 +1024,28 @@ scaling policies that were specified for the scalable target are deleted. 2e31-5. Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster. Neptune cluster - The resource type is cluster and the unique identifier is the cluster - name. Example: cluster:mycluster. SageMaker Serverless endpoint - The resource type is + name. Example: cluster:mycluster. SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: - inference-component/my-inference-component. + inference-component/my-inference-component. Pool of WorkSpaces - The resource type is + workspacespool and the unique identifier is the pool ID. Example: + workspacespool/wspool-123456. - `scalable_dimension`: The scalable dimension. This string consists of the service - namespace, resource type, and scaling property. ecs:service:DesiredCount - The desired - task count of an ECS service. elasticmapreduce:instancegroup:InstanceCount - The - instance count of an EMR Instance Group. ec2:spot-fleet-request:TargetCapacity - The - target capacity of a Spot Fleet. appstream:fleet:DesiredCapacity - The desired capacity - of an AppStream 2.0 fleet. dynamodb:table:ReadCapacityUnits - The provisioned read - capacity for a DynamoDB table. dynamodb:table:WriteCapacityUnits - The provisioned write - capacity for a DynamoDB table. dynamodb:index:ReadCapacityUnits - The provisioned read - capacity for a DynamoDB global secondary index. dynamodb:index:WriteCapacityUnits - The - provisioned write capacity for a DynamoDB global secondary index. - rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. - Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition. - sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model - endpoint variant. custom-resource:ResourceType:Property - The scalable dimension for a - custom resource provided by your own application or service. + namespace, resource type, and scaling property. ecs:service:DesiredCount - The task + count of an ECS service. elasticmapreduce:instancegroup:InstanceCount - The instance + count of an EMR Instance Group. ec2:spot-fleet-request:TargetCapacity - The target + capacity of a Spot Fleet. appstream:fleet:DesiredCapacity - The capacity of an AppStream + 2.0 fleet. dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a + DynamoDB table. dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a + DynamoDB table. dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a + DynamoDB global secondary index. dynamodb:index:WriteCapacityUnits - The provisioned + write capacity for a DynamoDB global secondary index. rds:cluster:ReadReplicaCount - The + count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible + edition and Aurora PostgreSQL-compatible edition. sagemaker:variant:DesiredInstanceCount + - The number of EC2 instances for a SageMaker model endpoint variant. + custom-resource:ResourceType:Property - The scalable dimension for a custom resource + provided by your own application or service. comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint. comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units @@ -1030,8 +1060,10 @@ scaling policies that were specified for the scalable target are deleted. Amazon ElastiCache replication group. neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster. sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a - SageMaker Serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The + SageMaker serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component. + workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the + WorkSpaces in the pool. - `service_namespace`: The namespace of the Amazon Web Services service that provides the resource. For a resource provided by your own application or service, use custom-resource instead. @@ -1116,7 +1148,7 @@ scheduled actions that were specified for the scalable target are deleted. - `resource_id`: The identifier of the resource associated with the scheduled action. This string consists of the resource type and unique identifier. ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: - service/default/sample-webapp. Spot Fleet - The resource type is spot-fleet-request and + service/my-cluster/my-service. Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. @@ -1148,26 +1180,28 @@ scheduled actions that were specified for the scalable target are deleted. 2e31-5. Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster. Neptune cluster - The resource type is cluster and the unique identifier is the cluster - name. Example: cluster:mycluster. SageMaker Serverless endpoint - The resource type is + name. Example: cluster:mycluster. SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: - inference-component/my-inference-component. + inference-component/my-inference-component. Pool of WorkSpaces - The resource type is + workspacespool and the unique identifier is the pool ID. Example: + workspacespool/wspool-123456. - `scalable_dimension`: The scalable dimension. This string consists of the service - namespace, resource type, and scaling property. ecs:service:DesiredCount - The desired - task count of an ECS service. elasticmapreduce:instancegroup:InstanceCount - The - instance count of an EMR Instance Group. ec2:spot-fleet-request:TargetCapacity - The - target capacity of a Spot Fleet. appstream:fleet:DesiredCapacity - The desired capacity - of an AppStream 2.0 fleet. dynamodb:table:ReadCapacityUnits - The provisioned read - capacity for a DynamoDB table. dynamodb:table:WriteCapacityUnits - The provisioned write - capacity for a DynamoDB table. dynamodb:index:ReadCapacityUnits - The provisioned read - capacity for a DynamoDB global secondary index. dynamodb:index:WriteCapacityUnits - The - provisioned write capacity for a DynamoDB global secondary index. - rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. - Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition. - sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model - endpoint variant. custom-resource:ResourceType:Property - The scalable dimension for a - custom resource provided by your own application or service. + namespace, resource type, and scaling property. ecs:service:DesiredCount - The task + count of an ECS service. elasticmapreduce:instancegroup:InstanceCount - The instance + count of an EMR Instance Group. ec2:spot-fleet-request:TargetCapacity - The target + capacity of a Spot Fleet. appstream:fleet:DesiredCapacity - The capacity of an AppStream + 2.0 fleet. dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a + DynamoDB table. dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a + DynamoDB table. dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a + DynamoDB global secondary index. dynamodb:index:WriteCapacityUnits - The provisioned + write capacity for a DynamoDB global secondary index. rds:cluster:ReadReplicaCount - The + count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible + edition and Aurora PostgreSQL-compatible edition. sagemaker:variant:DesiredInstanceCount + - The number of EC2 instances for a SageMaker model endpoint variant. + custom-resource:ResourceType:Property - The scalable dimension for a custom resource + provided by your own application or service. comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint. comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units @@ -1182,8 +1216,10 @@ scheduled actions that were specified for the scalable target are deleted. Amazon ElastiCache replication group. neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster. sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a - SageMaker Serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The + SageMaker serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component. + workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the + WorkSpaces in the pool. - `scheduled_action_name`: The name of the scheduled action. This name must be unique among all other scheduled actions on the specified scalable target. - `service_namespace`: The namespace of the Amazon Web Services service that provides the @@ -1205,8 +1241,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys At and cron expressions use Universal Coordinated Time (UTC) by default. The cron format consists of six fields separated by white spaces: [Minutes] [Hours] [Day_of_Month] [Month] [Day_of_Week] [Year]. For rate expressions, value is a positive integer and unit is minute - | minutes | hour | hours | day | days. For more information and examples, see Example - scheduled actions for Application Auto Scaling in the Application Auto Scaling User Guide. + | minutes | hour | hours | day | days. For more information, see Schedule recurring scaling + actions using cron expressions in the Application Auto Scaling User Guide. - `"StartTime"`: The date and time for this scheduled action to start, in UTC. - `"Timezone"`: Specifies the time zone used when setting a scheduled action by using an at or cron expression. If a time zone is not provided, UTC is used by default. Valid values @@ -1294,7 +1330,7 @@ even if you don't include the MinCapacity or MaxCapacity request parameters. - `resource_id`: The identifier of the resource that is associated with the scalable target. This string consists of the resource type and unique identifier. ECS service - The resource type is service and the unique identifier is the cluster name and service - name. Example: service/default/sample-webapp. Spot Fleet - The resource type is + name. Example: service/my-cluster/my-service. Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. @@ -1326,17 +1362,19 @@ even if you don't include the MinCapacity or MaxCapacity request parameters. 2e31-5. Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster. Neptune cluster - The resource type is cluster and the unique identifier is the cluster - name. Example: cluster:mycluster. SageMaker Serverless endpoint - The resource type is + name. Example: cluster:mycluster. SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: - inference-component/my-inference-component. + inference-component/my-inference-component. Pool of WorkSpaces - The resource type is + workspacespool and the unique identifier is the pool ID. Example: + workspacespool/wspool-123456. - `scalable_dimension`: The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property. - ecs:service:DesiredCount - The desired task count of an ECS service. + ecs:service:DesiredCount - The task count of an ECS service. elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group. ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet. - appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet. + appstream:fleet:DesiredCapacity - The capacity of an AppStream 2.0 fleet. dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table. dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table. dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global @@ -1361,8 +1399,10 @@ even if you don't include the MinCapacity or MaxCapacity request parameters. Amazon ElastiCache replication group. neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster. sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a - SageMaker Serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The + SageMaker serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component. + workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the + WorkSpaces in the pool. - `service_namespace`: The namespace of the Amazon Web Services service that provides the resource. For a resource provided by your own application or service, use custom-resource instead. @@ -1383,20 +1423,20 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys capacity limit in response to changing demand. This property is required when registering a new scalable target. For the following resources, the minimum value allowed is 0. AppStream 2.0 fleets Aurora DB clusters ECS services EMR clusters Lambda - provisioned concurrency SageMaker endpoint variants SageMaker Serverless endpoint - provisioned concurrency Spot Fleets custom resources It's strongly recommended that - you specify a value greater than 0. A value greater than 0 means that data points are - continuously reported to CloudWatch that scaling policies can use to scale on a metric like - average CPU utilization. For all other resources, the minimum allowed value depends on the - type of resource that you are using. If you provide a value that is lower than what a - resource can accept, an error occurs. In which case, the error message will provide the - minimum value that the resource can accept. + provisioned concurrency SageMaker endpoint variants SageMaker inference components + SageMaker serverless endpoint provisioned concurrency Spot Fleets custom resources + It's strongly recommended that you specify a value greater than 0. A value greater than 0 + means that data points are continuously reported to CloudWatch that scaling policies can + use to scale on a metric like average CPU utilization. For all other resources, the minimum + allowed value depends on the type of resource that you are using. If you provide a value + that is lower than what a resource can accept, an error occurs. In which case, the error + message will provide the minimum value that the resource can accept. - `"RoleARN"`: This parameter is required for services that do not support service-linked roles (such as Amazon EMR), and it must specify the ARN of an IAM role that allows Application Auto Scaling to modify the scalable target on your behalf. If the service supports service-linked roles, Application Auto Scaling uses a service-linked role, which - it creates if it does not yet exist. For more information, see Application Auto Scaling IAM - roles. + it creates if it does not yet exist. For more information, see How Application Auto Scaling + works with IAM. - `"SuspendedState"`: An embedded object that contains attributes and attribute values that are used to suspend and resume automatic scaling. Setting the value of an attribute to true suspends the specified scaling activities. Setting it to false (default) resumes the @@ -1405,8 +1445,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are suspended. For DynamicScalingOutSuspended, while a suspension is in effect, all scale-out activities that are triggered by a scaling policy are suspended. For ScheduledScalingSuspended, while a suspension is in effect, all scaling activities that - involve scheduled actions are suspended. For more information, see Suspending and - resuming scaling in the Application Auto Scaling User Guide. + involve scheduled actions are suspended. For more information, see Suspend and resume + scaling in the Application Auto Scaling User Guide. - `"Tags"`: Assigns one or more tags to the scalable target. Use this parameter to tag the scalable target when it is created. To tag an existing scalable target, use the TagResource operation. Each tag consists of a tag key and a tag value. Both the tag key and the tag @@ -1466,10 +1506,10 @@ tag key and a tag value. To edit a tag, specify an existing tag key and a new ta You can use this operation to tag an Application Auto Scaling scalable target, but you cannot tag a scaling policy or scheduled action. You can also add tags to an Application Auto Scaling scalable target while creating it (RegisterScalableTarget). For general -information about tags, including the format and syntax, see Tagging Amazon Web Services -resources in the Amazon Web Services General Reference. Use tags to control access to a -scalable target. For more information, see Tagging support for Application Auto Scaling in -the Application Auto Scaling User Guide. +information about tags, including the format and syntax, see Tagging your Amazon Web +Services resources in the Amazon Web Services General Reference. Use tags to control access +to a scalable target. For more information, see Tagging support for Application Auto +Scaling in the Application Auto Scaling User Guide. # Arguments - `resource_arn`: Identifies the Application Auto Scaling scalable target that you want to @@ -1482,7 +1522,7 @@ the Application Auto Scaling User Guide. specify an existing tag key with a different tag value, Application Auto Scaling replaces the current tag value with the specified one. For information about the rules that apply to tag keys and tag values, see User-defined tag restrictions in the Amazon Web Services - Billing and Cost Management User Guide. + Billing User Guide. """ function tag_resource(ResourceARN, Tags; aws_config::AbstractAWSConfig=global_aws_config()) diff --git a/src/services/chime_sdk_media_pipelines.jl b/src/services/chime_sdk_media_pipelines.jl index 3c9ec4e10..39777491d 100644 --- a/src/services/chime_sdk_media_pipelines.jl +++ b/src/services/chime_sdk_media_pipelines.jl @@ -320,16 +320,26 @@ end create_media_pipeline_kinesis_video_stream_pool(pool_name, stream_configuration) create_media_pipeline_kinesis_video_stream_pool(pool_name, stream_configuration, params::Dict{String,<:Any}) -Creates an Kinesis video stream pool for the media pipeline. +Creates an Amazon Kinesis Video Stream pool for use with media stream pipelines. If a +meeting uses an opt-in Region as its MediaRegion, the KVS stream must be in that same +Region. For example, if a meeting uses the af-south-1 Region, the KVS stream must also be +in af-south-1. However, if the meeting uses a Region that AWS turns on by default, the KVS +stream can be in any available Region, including an opt-in Region. For example, if the +meeting uses ca-central-1, the KVS stream can be in eu-west-2, us-east-1, af-south-1, or +any other Region that the Amazon Chime SDK supports. To learn which AWS Region a meeting +uses, call the GetMeeting API and use the MediaRegion parameter from the response. For more +information about opt-in Regions, refer to Available Regions in the Amazon Chime SDK +Developer Guide, and Specify which AWS Regions your account can use, in the AWS Account +Management Reference Guide. # Arguments -- `pool_name`: The name of the video stream pool. -- `stream_configuration`: The configuration settings for the video stream. +- `pool_name`: The name of the pool. +- `stream_configuration`: The configuration settings for the stream. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"ClientRequestToken"`: The token assigned to the client making the request. -- `"Tags"`: The tags assigned to the video stream pool. +- `"Tags"`: The tags assigned to the stream pool. """ function create_media_pipeline_kinesis_video_stream_pool( PoolName, StreamConfiguration; aws_config::AbstractAWSConfig=global_aws_config() @@ -531,10 +541,11 @@ end delete_media_pipeline_kinesis_video_stream_pool(identifier) delete_media_pipeline_kinesis_video_stream_pool(identifier, params::Dict{String,<:Any}) -Deletes an Kinesis video stream pool. +Deletes an Amazon Kinesis Video Stream pool. # Arguments -- `identifier`: The ID of the pool being deleted. +- `identifier`: The unique identifier of the requested resource. Valid values include the + name and ARN of the media pipeline Kinesis Video Stream pool. """ function delete_media_pipeline_kinesis_video_stream_pool( @@ -671,7 +682,8 @@ end Gets an Kinesis video stream pool. # Arguments -- `identifier`: The ID of the video stream pool. +- `identifier`: The unique identifier of the requested resource. Valid values include the + name and ARN of the media pipeline Kinesis Video Stream pool. """ function get_media_pipeline_kinesis_video_stream_pool( @@ -1314,10 +1326,11 @@ end update_media_pipeline_kinesis_video_stream_pool(identifier) update_media_pipeline_kinesis_video_stream_pool(identifier, params::Dict{String,<:Any}) -Updates an Kinesis video stream pool in a media pipeline. +Updates an Amazon Kinesis Video Stream pool in a media pipeline. # Arguments -- `identifier`: The ID of the video stream pool. +- `identifier`: The unique identifier of the requested resource. Valid values include the + name and ARN of the media pipeline Kinesis Video Stream pool. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: diff --git a/src/services/cloudfront.jl b/src/services/cloudfront.jl index 21bf5b85f..1ce2a1b87 100644 --- a/src/services/cloudfront.jl +++ b/src/services/cloudfront.jl @@ -3692,7 +3692,8 @@ end list_tags_for_resource2020_05_31(resource) list_tags_for_resource2020_05_31(resource, params::Dict{String,<:Any}) -List tags for a CloudFront resource. +List tags for a CloudFront resource. For more information, see Tagging a distribution in +the Amazon CloudFront Developer Guide. # Arguments - `resource`: An ARN of a CloudFront resource. @@ -3778,7 +3779,8 @@ end tag_resource2020_05_31(resource, tags) tag_resource2020_05_31(resource, tags, params::Dict{String,<:Any}) -Add tags to a CloudFront resource. +Add tags to a CloudFront resource. For more information, see Tagging a distribution in the +Amazon CloudFront Developer Guide. # Arguments - `resource`: An ARN of a CloudFront resource. @@ -3884,7 +3886,8 @@ end untag_resource2020_05_31(resource, tag_keys) untag_resource2020_05_31(resource, tag_keys, params::Dict{String,<:Any}) -Remove tags from a CloudFront resource. +Remove tags from a CloudFront resource. For more information, see Tagging a distribution in +the Amazon CloudFront Developer Guide. # Arguments - `resource`: An ARN of a CloudFront resource. diff --git a/src/services/cloudhsm_v2.jl b/src/services/cloudhsm_v2.jl index 55326e943..6e20bec62 100644 --- a/src/services/cloudhsm_v2.jl +++ b/src/services/cloudhsm_v2.jl @@ -8,7 +8,8 @@ using AWS.UUIDs copy_backup_to_region(backup_id, destination_region) copy_backup_to_region(backup_id, destination_region, params::Dict{String,<:Any}) -Copy an AWS CloudHSM cluster backup to a different region. +Copy an CloudHSM cluster backup to a different region. Cross-account use: No. You cannot +perform this operation on an CloudHSM backup in a different Amazon Web Services account. # Arguments - `backup_id`: The ID of the backup that will be copied to the destination region. @@ -57,7 +58,9 @@ end create_cluster(hsm_type, subnet_ids) create_cluster(hsm_type, subnet_ids, params::Dict{String,<:Any}) -Creates a new AWS CloudHSM cluster. +Creates a new CloudHSM cluster. Cross-account use: Yes. To perform this operation with an +CloudHSM backup in a different AWS account, specify the full backup ARN in the value of the +SourceBackupId parameter. # Arguments - `hsm_type`: The type of HSM to use in the cluster. The allowed values are hsm1.medium and @@ -71,9 +74,10 @@ Creates a new AWS CloudHSM cluster. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"BackupRetentionPolicy"`: A policy that defines how the service retains backups. - `"Mode"`: The mode to use in the cluster. The allowed values are FIPS and NON_FIPS. -- `"SourceBackupId"`: The identifier (ID) of the cluster backup to restore. Use this value - to restore the cluster from a backup instead of creating a new cluster. To find the backup - ID, use DescribeBackups. +- `"SourceBackupId"`: The identifier (ID) or the Amazon Resource Name (ARN) of the cluster + backup to restore. Use this value to restore the cluster from a backup instead of creating + a new cluster. To find the backup ID or ARN, use DescribeBackups. If using a backup in + another account, the full ARN must be supplied. - `"TagList"`: Tags to apply to the CloudHSM cluster during creation. """ function create_cluster( @@ -110,7 +114,9 @@ end create_hsm(availability_zone, cluster_id) create_hsm(availability_zone, cluster_id, params::Dict{String,<:Any}) -Creates a new hardware security module (HSM) in the specified AWS CloudHSM cluster. +Creates a new hardware security module (HSM) in the specified CloudHSM cluster. +Cross-account use: No. You cannot perform this operation on an CloudHSM cluster in a +different Amazon Web Service account. # Arguments - `availability_zone`: The Availability Zone where you are creating the HSM. To find the @@ -160,8 +166,10 @@ end delete_backup(backup_id) delete_backup(backup_id, params::Dict{String,<:Any}) -Deletes a specified AWS CloudHSM backup. A backup can be restored up to 7 days after the -DeleteBackup request is made. For more information on restoring a backup, see RestoreBackup. +Deletes a specified CloudHSM backup. A backup can be restored up to 7 days after the +DeleteBackup request is made. For more information on restoring a backup, see +RestoreBackup. Cross-account use: No. You cannot perform this operation on an CloudHSM +backup in a different Amazon Web Services account. # Arguments - `backup_id`: The ID of the backup to be deleted. To find the ID of a backup, use the @@ -195,9 +203,10 @@ end delete_cluster(cluster_id) delete_cluster(cluster_id, params::Dict{String,<:Any}) -Deletes the specified AWS CloudHSM cluster. Before you can delete a cluster, you must -delete all HSMs in the cluster. To see if the cluster contains any HSMs, use -DescribeClusters. To delete an HSM, use DeleteHsm. +Deletes the specified CloudHSM cluster. Before you can delete a cluster, you must delete +all HSMs in the cluster. To see if the cluster contains any HSMs, use DescribeClusters. To +delete an HSM, use DeleteHsm. Cross-account use: No. You cannot perform this operation on +an CloudHSM cluster in a different Amazon Web Services account. # Arguments - `cluster_id`: The identifier (ID) of the cluster that you are deleting. To find the @@ -234,6 +243,8 @@ end Deletes the specified HSM. To specify an HSM, you can use its identifier (ID), the IP address of the HSM's elastic network interface (ENI), or the ID of the HSM's ENI. You need to specify only one of these values. To find these values, use DescribeClusters. +Cross-account use: No. You cannot perform this operation on an CloudHSM hsm in a different +Amazon Web Services account. # Arguments - `cluster_id`: The identifier (ID) of the cluster that contains the HSM that you are @@ -270,15 +281,49 @@ function delete_hsm( ) end +""" + delete_resource_policy() + delete_resource_policy(params::Dict{String,<:Any}) + + Deletes an CloudHSM resource policy. Deleting a resource policy will result in the +resource being unshared and removed from any RAM resource shares. Deleting the resource +policy attached to a backup will not impact any clusters created from that backup. +Cross-account use: No. You cannot perform this operation on an CloudHSM resource in a +different Amazon Web Services account. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ResourceArn"`: Amazon Resource Name (ARN) of the resource from which the policy will be + removed. +""" +function delete_resource_policy(; aws_config::AbstractAWSConfig=global_aws_config()) + return cloudhsm_v2( + "DeleteResourcePolicy"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function delete_resource_policy( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return cloudhsm_v2( + "DeleteResourcePolicy", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_backups() describe_backups(params::Dict{String,<:Any}) -Gets information about backups of AWS CloudHSM clusters. This is a paginated operation, -which means that each response might contain only a subset of all the backups. When the -response contains only a subset of backups, it includes a NextToken value. Use this value -in a subsequent DescribeBackups request to get more backups. When you receive a response -with no NextToken (or an empty or null value), that means there are no more backups to get. +Gets information about backups of CloudHSM clusters. Lists either the backups you own or +the backups shared with you when the Shared parameter is true. This is a paginated +operation, which means that each response might contain only a subset of all the backups. +When the response contains only a subset of backups, it includes a NextToken value. Use +this value in a subsequent DescribeBackups request to get more backups. When you receive a +response with no NextToken (or an empty or null value), that means there are no more +backups to get. Cross-account use: Yes. Customers can describe backups in other Amazon Web +Services accounts that are shared with them. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -296,6 +341,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys more backups than the number you specify, the response contains a NextToken value. - `"NextToken"`: The NextToken value that you received in the previous response. Use this value to get more backups. +- `"Shared"`: Describe backups that are shared with you. By default when using this + option, the command returns backups that have been shared using a standard Resource Access + Manager resource share. In order for a backup that was shared using the PutResourcePolicy + command to be returned, the share must be promoted to a standard resource share using the + RAM PromoteResourceShareCreatedFromPolicy API operation. For more information about sharing + backups, see Working with shared backups in the CloudHSM User Guide. - `"SortAscending"`: Designates whether or not to sort the return backups by ascending chronological order of generation. """ @@ -316,11 +367,13 @@ end describe_clusters() describe_clusters(params::Dict{String,<:Any}) -Gets information about AWS CloudHSM clusters. This is a paginated operation, which means -that each response might contain only a subset of all the clusters. When the response -contains only a subset of clusters, it includes a NextToken value. Use this value in a -subsequent DescribeClusters request to get more clusters. When you receive a response with -no NextToken (or an empty or null value), that means there are no more clusters to get. +Gets information about CloudHSM clusters. This is a paginated operation, which means that +each response might contain only a subset of all the clusters. When the response contains +only a subset of clusters, it includes a NextToken value. Use this value in a subsequent +DescribeClusters request to get more clusters. When you receive a response with no +NextToken (or an empty or null value), that means there are no more clusters to get. +Cross-account use: No. You cannot perform this operation on CloudHSM clusters in a +different Amazon Web Services account. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -347,14 +400,40 @@ function describe_clusters( ) end +""" + get_resource_policy() + get_resource_policy(params::Dict{String,<:Any}) + + Retrieves the resource policy document attached to a given resource. Cross-account use: +No. You cannot perform this operation on an CloudHSM resource in a different Amazon Web +Services account. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ResourceArn"`: Amazon Resource Name (ARN) of the resource to which a policy is attached. +""" +function get_resource_policy(; aws_config::AbstractAWSConfig=global_aws_config()) + return cloudhsm_v2( + "GetResourcePolicy"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function get_resource_policy( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return cloudhsm_v2( + "GetResourcePolicy", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + """ initialize_cluster(cluster_id, signed_cert, trust_anchor) initialize_cluster(cluster_id, signed_cert, trust_anchor, params::Dict{String,<:Any}) -Claims an AWS CloudHSM cluster by submitting the cluster certificate issued by your issuing +Claims an CloudHSM cluster by submitting the cluster certificate issued by your issuing certificate authority (CA) and the CA's root certificate. Before you can claim a cluster, you must sign the cluster's certificate signing request (CSR) with your issuing CA. To get -the cluster's CSR, use DescribeClusters. +the cluster's CSR, use DescribeClusters. Cross-account use: No. You cannot perform this +operation on an CloudHSM cluster in a different Amazon Web Services account. # Arguments - `cluster_id`: The identifier (ID) of the cluster that you are claiming. To find the @@ -412,11 +491,13 @@ end list_tags(resource_id) list_tags(resource_id, params::Dict{String,<:Any}) -Gets a list of tags for the specified AWS CloudHSM cluster. This is a paginated operation, +Gets a list of tags for the specified CloudHSM cluster. This is a paginated operation, which means that each response might contain only a subset of all the tags. When the response contains only a subset of tags, it includes a NextToken value. Use this value in a subsequent ListTags request to get more tags. When you receive a response with no NextToken -(or an empty or null value), that means there are no more tags to get. +(or an empty or null value), that means there are no more tags to get. Cross-account use: +No. You cannot perform this operation on an CloudHSM resource in a different Amazon Web +Services account. # Arguments - `resource_id`: The cluster identifier (ID) for the cluster whose tags you are getting. To @@ -456,7 +537,8 @@ end modify_backup_attributes(backup_id, never_expires) modify_backup_attributes(backup_id, never_expires, params::Dict{String,<:Any}) -Modifies attributes for AWS CloudHSM backup. +Modifies attributes for CloudHSM backup. Cross-account use: No. You cannot perform this +operation on an CloudHSM backup in a different Amazon Web Services account. # Arguments - `backup_id`: The identifier (ID) of the backup to modify. To find the ID of a backup, use @@ -500,7 +582,8 @@ end modify_cluster(backup_retention_policy, cluster_id) modify_cluster(backup_retention_policy, cluster_id, params::Dict{String,<:Any}) -Modifies AWS CloudHSM cluster. +Modifies CloudHSM cluster. Cross-account use: No. You cannot perform this operation on an +CloudHSM cluster in a different Amazon Web Services account. # Arguments - `backup_retention_policy`: A policy that defines how the service retains backups. @@ -543,12 +626,52 @@ function modify_cluster( ) end +""" + put_resource_policy() + put_resource_policy(params::Dict{String,<:Any}) + +Creates or updates an CloudHSM resource policy. A resource policy helps you to define the +IAM entity (for example, an Amazon Web Services account) that can manage your CloudHSM +resources. The following resources support CloudHSM resource policies: Backup - The +resource policy allows you to describe the backup and restore a cluster from the backup in +another Amazon Web Services account. In order to share a backup, it must be in a 'READY' +state and you must own it. While you can share a backup using the CloudHSM +PutResourcePolicy operation, we recommend using Resource Access Manager (RAM) instead. +Using RAM provides multiple benefits as it creates the policy for you, allows multiple +resources to be shared at one time, and increases the discoverability of shared resources. +If you use PutResourcePolicy and want consumers to be able to describe the backups you +share with them, you must promote the backup to a standard RAM Resource Share using the RAM +PromoteResourceShareCreatedFromPolicy API operation. For more information, see Working +with shared backups in the CloudHSM User Guide Cross-account use: No. You cannot perform +this operation on an CloudHSM resource in a different Amazon Web Services account. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Policy"`: The policy you want to associate with a resource. For an example policy, see + Working with shared backups in the CloudHSM User Guide +- `"ResourceArn"`: Amazon Resource Name (ARN) of the resource to which you want to attach a + policy. +""" +function put_resource_policy(; aws_config::AbstractAWSConfig=global_aws_config()) + return cloudhsm_v2( + "PutResourcePolicy"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function put_resource_policy( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return cloudhsm_v2( + "PutResourcePolicy", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + """ restore_backup(backup_id) restore_backup(backup_id, params::Dict{String,<:Any}) -Restores a specified AWS CloudHSM backup that is in the PENDING_DELETION state. For mor -information on deleting a backup, see DeleteBackup. +Restores a specified CloudHSM backup that is in the PENDING_DELETION state. For more +information on deleting a backup, see DeleteBackup. Cross-account use: No. You cannot +perform this operation on an CloudHSM backup in a different Amazon Web Services account. # Arguments - `backup_id`: The ID of the backup to be restored. To find the ID of a backup, use the @@ -582,7 +705,9 @@ end tag_resource(resource_id, tag_list) tag_resource(resource_id, tag_list, params::Dict{String,<:Any}) -Adds or overwrites one or more tags for the specified AWS CloudHSM cluster. +Adds or overwrites one or more tags for the specified CloudHSM cluster. Cross-account use: +No. You cannot perform this operation on an CloudHSM resource in a different Amazon Web +Services account. # Arguments - `resource_id`: The cluster identifier (ID) for the cluster that you are tagging. To find @@ -624,7 +749,9 @@ end untag_resource(resource_id, tag_key_list) untag_resource(resource_id, tag_key_list, params::Dict{String,<:Any}) -Removes the specified tag or tags from the specified AWS CloudHSM cluster. +Removes the specified tag or tags from the specified CloudHSM cluster. Cross-account use: +No. You cannot perform this operation on an CloudHSM resource in a different Amazon Web +Services account. # Arguments - `resource_id`: The cluster identifier (ID) for the cluster whose tags you are removing. diff --git a/src/services/connect.jl b/src/services/connect.jl index 23a305e19..e7e326df9 100644 --- a/src/services/connect.jl +++ b/src/services/connect.jl @@ -1048,7 +1048,7 @@ provided in the StartAttachedFileUpload API. # Arguments - `file_id`: The unique identifier of the attached file resource. -- `instance_id`: The unique identifier of the Connect instance. +- `instance_id`: The unique identifier of the Amazon Connect instance. - `associated_resource_arn`: The resource to which the attached file is (being) uploaded to. Cases are the only current supported resource. This value must be a valid ARN. @@ -3574,6 +3574,45 @@ function describe_agent_status( ) end +""" + describe_authentication_profile(authentication_profile_id, instance_id) + describe_authentication_profile(authentication_profile_id, instance_id, params::Dict{String,<:Any}) + +This API is in preview release for Amazon Connect and is subject to change. To request +access to this API, contact Amazon Web Services Support. Describes the target +authentication profile. + +# Arguments +- `authentication_profile_id`: A unique identifier for the authentication profile. +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instance + ID in the Amazon Resource Name (ARN) of the instance. + +""" +function describe_authentication_profile( + AuthenticationProfileId, InstanceId; aws_config::AbstractAWSConfig=global_aws_config() +) + return connect( + "GET", + "/authentication-profiles/$(InstanceId)/$(AuthenticationProfileId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_authentication_profile( + AuthenticationProfileId, + InstanceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "GET", + "/authentication-profiles/$(InstanceId)/$(AuthenticationProfileId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_contact(contact_id, instance_id) describe_contact(contact_id, instance_id, params::Dict{String,<:Any}) @@ -5657,19 +5696,7 @@ definitions in the Amazon Connect Administrator Guide. Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average customer talk time CASES_CREATED Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: - Cases created CONTACTS_ABANDONED Unit: Count Metric filter: Valid values: API| - Incoming | Outbound | Transfer | Callback | Queue_Transfer| Disconnect Valid groupings - and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, - contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect UI name: - Contact abandoned CONTACTS_ABANDONED_IN_X Unit: Count Valid groupings and filters: - Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect - Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in - seconds. For Comparison, you must enter LT (for \"Less than\"). UI name: Contacts - abandoned in X seconds CONTACTS_ANSWERED_IN_X Unit: Count Valid groupings and filters: - Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect - Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in - seconds. For Comparison, you must enter LT (for \"Less than\"). UI name: Contacts answered - in X seconds CONTACTS_CREATED Unit: Count Valid metric filter key: INITIATION_METHOD + Cases created CONTACTS_CREATED Unit: Count Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts created Feature is a valid filter but not a valid grouping. CONTACTS_HANDLED Unit: Count Valid metric @@ -5698,18 +5725,22 @@ definitions in the Amazon Connect Administrator Guide. contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts queued CONTACTS_QUEUED_BY_ENQUEUE Unit: Count Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype UI name: Contacts queued - (enqueue timestamp) CONTACTS_RESOLVED_IN_X Unit: Count Valid groupings and filters: - Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect - Threshold: For ThresholdValue enter any whole number from 1 to 604800 (inclusive), in - seconds. For Comparison, you must enter LT (for \"Less than\"). UI name: Contacts resolved - in X CONTACTS_TRANSFERRED_OUT Unit: Count Valid groupings and filters: Queue, Channel, - Routing Profile, Agent, Agent Hierarchy, Feature, + (enqueue timestamp) CONTACTS_REMOVED_FROM_QUEUE_IN_X Unit: Count Valid groupings and + filters: Queue, Channel, Routing Profile, Q in Connect Threshold: For ThresholdValue, enter + any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter + LT (for \"Less than\"). UI name: This metric is not available in Amazon Connect admin + website. CONTACTS_RESOLVED_IN_X Unit: Count Valid groupings and filters: Queue, Channel, + Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect Threshold: For + ThresholdValue enter any whole number from 1 to 604800 (inclusive), in seconds. For + Comparison, you must enter LT (for \"Less than\"). UI name: Contacts resolved in X + CONTACTS_TRANSFERRED_OUT Unit: Count Valid groupings and filters: Queue, Channel, Routing + Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in + Connect UI name: Contacts transferred out Feature is a valid filter but not a valid + grouping. CONTACTS_TRANSFERRED_OUT_BY_AGENT Unit: Count Valid groupings and filters: + Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts transferred out - Feature is a valid filter but not a valid grouping. CONTACTS_TRANSFERRED_OUT_BY_AGENT - Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent - Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts - transferred out by agent CONTACTS_TRANSFERRED_OUT_FROM_QUEUE Unit: Count Valid groupings - and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, + by agent CONTACTS_TRANSFERRED_OUT_FROM_QUEUE Unit: Count Valid groupings and filters: + Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts transferred out queue CURRENT_CASES Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Current cases FLOWS_OUTCOME Unit: @@ -5732,19 +5763,21 @@ definitions in the Amazon Connect Administrator Guide. PERCENT_CASES_FIRST_CONTACT_RESOLVED Unit: Percent Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases resolved on first contact PERCENT_CONTACTS_STEP_EXPIRED Unit: Percent Valid groupings and filters: - Queue, RoutingStepExpression UI name: Not available PERCENT_CONTACTS_STEP_JOINED Unit: - Percent Valid groupings and filters: Queue, RoutingStepExpression UI name: Not available - PERCENT_FLOWS_OUTCOME Unit: Percent Valid metric filter key: FLOWS_OUTCOME_TYPE Valid - groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows - module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome - type, Flows resource ID, Initiation method, Resource published timestamp UI name: Flows - outcome percentage. The FLOWS_OUTCOME_TYPE is not a valid grouping. - PERCENT_NON_TALK_TIME This metric is available only for contacts analyzed by Contact Lens - conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, - Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in - Connect UI name: Non-talk time percent PERCENT_TALK_TIME This metric is available only - for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid - groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, + Queue, RoutingStepExpression UI name: This metric is available in Real-time Metrics UI but + not on the Historical Metrics UI. PERCENT_CONTACTS_STEP_JOINED Unit: Percent Valid + groupings and filters: Queue, RoutingStepExpression UI name: This metric is available in + Real-time Metrics UI but not on the Historical Metrics UI. PERCENT_FLOWS_OUTCOME Unit: + Percent Valid metric filter key: FLOWS_OUTCOME_TYPE Valid groupings and filters: Channel, + contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next + resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, + Initiation method, Resource published timestamp UI name: Flows outcome percentage. The + FLOWS_OUTCOME_TYPE is not a valid grouping. PERCENT_NON_TALK_TIME This metric is + available only for contacts analyzed by Contact Lens conversational analytics. Unit: + Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent + Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Non-talk time + percent PERCENT_TALK_TIME This metric is available only for contacts analyzed by Contact + Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, + Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Talk time percent PERCENT_TALK_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, @@ -5762,27 +5795,40 @@ definitions in the Amazon Connect Administrator Guide. Channel, Routing Profile, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for \"Less than\"). UI name: Service level X STEP_CONTACTS_QUEUED Unit: Count Valid - groupings and filters: Queue, RoutingStepExpression UI name: Not available - SUM_AFTER_CONTACT_WORK_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, - Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: After contact work time - SUM_CONNECTING_TIME_AGENT Unit: Seconds Valid metric filter key: INITIATION_METHOD. This - metric only supports the following filter keys as INITIATION_METHOD: INBOUND | OUTBOUND | - CALLBACK | API Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent - Hierarchy UI name: Agent API connecting time The Negate key in Metric Level Filters is - not applicable for this metric. SUM_CONTACT_FLOW_TIME Unit: Seconds Valid groupings and - filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: - Contact flow time SUM_CONTACT_TIME_AGENT Unit: Seconds Valid groupings and filters: - Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent on contact time - SUM_CONTACTS_DISCONNECTED Valid metric filter key: DISCONNECT_REASON Unit: Count Valid - groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, + groupings and filters: Queue, RoutingStepExpression UI name: This metric is available in + Real-time Metrics UI but not on the Historical Metrics UI. SUM_AFTER_CONTACT_WORK_TIME + Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent + Hierarchy, Q in Connect UI name: After contact work time SUM_CONNECTING_TIME_AGENT Unit: + Seconds Valid metric filter key: INITIATION_METHOD. This metric only supports the following + filter keys as INITIATION_METHOD: INBOUND | OUTBOUND | CALLBACK | API Valid groupings and + filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent API + connecting time The Negate key in Metric Level Filters is not applicable for this metric. + SUM_CONTACTS_ABANDONED Unit: Count Metric filter: Valid values: API| Incoming | + Outbound | Transfer | Callback | Queue_Transfer| Disconnect Valid groupings and filters: + Queue, Channel, Routing Profile, Agent, Agent Hierarchy, + contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect UI name: + Contact abandoned SUM_CONTACTS_ABANDONED_IN_X Unit: Count Valid groupings and filters: + Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect + Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in + seconds. For Comparison, you must enter LT (for \"Less than\"). UI name: Contacts + abandoned in X seconds SUM_CONTACTS_ANSWERED_IN_X Unit: Count Valid groupings and + filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in + Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), + in seconds. For Comparison, you must enter LT (for \"Less than\"). UI name: Contacts + answered in X seconds SUM_CONTACT_FLOW_TIME Unit: Seconds Valid groupings and filters: + Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contact flow + time SUM_CONTACT_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, + Agent, Agent Hierarchy UI name: Agent on contact time SUM_CONTACTS_DISCONNECTED Valid + metric filter key: DISCONNECT_REASON Unit: Count Valid groupings and filters: Queue, + Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contact disconnected - SUM_ERROR_STATUS_TIME_AGENT Unit: Seconds Valid groupings and filters: Queue, Channel, - Routing Profile, Agent, Agent Hierarchy UI name: Error status time SUM_HANDLE_TIME Unit: - Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent - Hierarchy, Q in Connect UI name: Contact handle time SUM_HOLD_TIME Unit: Count Valid + SUM_ERROR_STATUS_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, + Agent, Agent Hierarchy UI name: Error status time SUM_HANDLE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in - Connect UI name: Customer hold time SUM_IDLE_TIME_AGENT Unit: Seconds Valid groupings - and filters: Routing Profile, Agent, Agent Hierarchy UI name: Agent idle time + Connect UI name: Contact handle time SUM_HOLD_TIME Unit: Count Valid groupings and + filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: + Customer hold time SUM_IDLE_TIME_AGENT Unit: Seconds Valid groupings and filters: + Routing Profile, Agent, Agent Hierarchy UI name: Agent idle time SUM_INTERACTION_AND_HOLD_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Agent interaction and hold time SUM_INTERACTION_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, @@ -5999,7 +6045,20 @@ end Imports a claimed phone number from an external service, such as Amazon Pinpoint, into an Amazon Connect instance. You can call this API only in the same Amazon Web Services Region -where the Amazon Connect instance was created. +where the Amazon Connect instance was created. Call the DescribePhoneNumber API to verify +the status of a previous ImportPhoneNumber operation. If you plan to claim or import +numbers and then release numbers frequently, contact us for a service quota exception. +Otherwise, it is possible you will be blocked from claiming and releasing any more numbers +until up to 180 days past the oldest number released has expired. By default you can +claim or import and then release up to 200% of your maximum number of active phone numbers. +If you claim or import and then release phone numbers using the UI or API during a rolling +180 day cycle that exceeds 200% of your phone number service level quota, you will be +blocked from claiming or importing any more numbers until 180 days past the oldest number +released has expired. For example, if you already have 99 claimed or imported numbers and +a service level quota of 99 phone numbers, and in any 180 day period you release 99, claim +99, and then release 99, you will have exceeded the 200% limit. At that point you are +blocked from claiming any more numbers until you open an Amazon Web Services Support +ticket. # Arguments - `instance_id`: The identifier of the Amazon Connect instance. You can find the instance @@ -6180,6 +6239,48 @@ function list_approved_origins( ) end +""" + list_authentication_profiles(instance_id) + list_authentication_profiles(instance_id, params::Dict{String,<:Any}) + +This API is in preview release for Amazon Connect and is subject to change. To request +access to this API, contact Amazon Web Services Support. Provides summary information about +the authentication profiles in a specified Amazon Connect instance. + +# Arguments +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instance + ID in the Amazon Resource Name (ARN) of the instance. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return per page. +- `"nextToken"`: The token for the next set of results. Use the value returned in the + previous response in the next request to retrieve the next set of results. +""" +function list_authentication_profiles( + InstanceId; aws_config::AbstractAWSConfig=global_aws_config() +) + return connect( + "GET", + "/authentication-profiles-summary/$(InstanceId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_authentication_profiles( + InstanceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "GET", + "/authentication-profiles-summary/$(InstanceId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_bots(instance_id, lex_version) list_bots(instance_id, lex_version, params::Dict{String,<:Any}) @@ -9054,13 +9155,13 @@ end start_attached_file_upload(file_name, file_size_in_bytes, file_use_case_type, instance_id, associated_resource_arn, params::Dict{String,<:Any}) Provides a pre-signed Amazon S3 URL in response for uploading your content. You may only -use this API to upload attachments to a Connect Case. +use this API to upload attachments to an Amazon Connect Case. # Arguments - `file_name`: A case-sensitive name of the attached file being uploaded. - `file_size_in_bytes`: The size of the attached file in bytes. - `file_use_case_type`: The use case for the file. -- `instance_id`: The unique identifier of the Connect instance. +- `instance_id`: The unique identifier of the Amazon Connect instance. - `associated_resource_arn`: The resource to which the attached file is (being) uploaded to. Cases are the only current supported resource. This value must be a valid ARN. @@ -9141,8 +9242,9 @@ throttling returns a TooManyRequests exception. The quota for concurrent activ exceeded. Active chat throttling returns a LimitExceededException. If you use the ChatDurationInMinutes parameter and receive a 400 error, your account may not support the ability to configure custom chat durations. For more information, contact Amazon Web -Services Support. For more information about chat, see Chat in the Amazon Connect -Administrator Guide. +Services Support. For more information about chat, see the following topics in the Amazon +Connect Administrator Guide: Concepts: Web and mobile messaging capabilities in Amazon +Connect Amazon Connect Chat security best practices # Arguments - `contact_flow_id`: The identifier of the flow for initiating the chat. To see the @@ -9384,7 +9486,9 @@ end Initiates real-time message streaming for a new chat contact. For more information about message streaming, see Enable real-time chat message streaming in the Amazon Connect -Administrator Guide. +Administrator Guide. For more information about chat, see the following topics in the +Amazon Connect Administrator Guide: Concepts: Web and mobile messaging capabilities in +Amazon Connect Amazon Connect Chat security best practices # Arguments - `chat_streaming_configuration`: The streaming configuration, such as the Amazon SNS @@ -9674,8 +9778,8 @@ Amazon Connect instance (specified as InstanceId). # Arguments - `contact_flow_id`: The identifier of the flow for the call. To see the ContactFlowId in - the Amazon Connect admin website, on the navigation menu go to Routing, Contact Flows. - Choose the flow. On the flow page, under the name of the flow, choose Show additional flow + the Amazon Connect admin website, on the navigation menu go to Routing, Flows. Choose the + flow. On the flow page, under the name of the flow, choose Show additional flow information. The ContactFlowId is the last part of the ARN, shown here in bold: arn:aws:connect:us-west-2:xxxxxxxxxxxx:instance/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/contact -flow/846ec553-a005-41c0-8341-xxxxxxxxxxxx @@ -10317,6 +10421,61 @@ function update_agent_status( ) end +""" + update_authentication_profile(authentication_profile_id, instance_id) + update_authentication_profile(authentication_profile_id, instance_id, params::Dict{String,<:Any}) + +This API is in preview release for Amazon Connect and is subject to change. To request +access to this API, contact Amazon Web Services Support. Updates the selected +authentication profile. + +# Arguments +- `authentication_profile_id`: A unique identifier for the authentication profile. +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instance + ID in the Amazon Resource Name (ARN) of the instance. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AllowedIps"`: A list of IP address range strings that are allowed to access the + instance. For more information on how to configure IP addresses, seeConfigure session + timeouts in the Amazon Connect Administrator Guide. +- `"BlockedIps"`: A list of IP address range strings that are blocked from accessing the + instance. For more information on how to configure IP addresses, For more information on + how to configure IP addresses, see Configure IP-based access control in the Amazon Connect + Administrator Guide. +- `"Description"`: The description for the authentication profile. +- `"Name"`: The name for the authentication profile. +- `"PeriodicSessionDuration"`: The short lived session duration configuration for users + logged in to Amazon Connect, in minutes. This value determines the maximum possible time + before an agent is authenticated. For more information, For more information on how to + configure IP addresses, see Configure session timeouts in the Amazon Connect Administrator + Guide. +""" +function update_authentication_profile( + AuthenticationProfileId, InstanceId; aws_config::AbstractAWSConfig=global_aws_config() +) + return connect( + "POST", + "/authentication-profiles/$(InstanceId)/$(AuthenticationProfileId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_authentication_profile( + AuthenticationProfileId, + InstanceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "POST", + "/authentication-profiles/$(InstanceId)/$(AuthenticationProfileId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_contact(contact_id, instance_id) update_contact(contact_id, instance_id, params::Dict{String,<:Any}) diff --git a/src/services/controltower.jl b/src/services/controltower.jl index d5f92b0b2..c7e452f05 100644 --- a/src/services/controltower.jl +++ b/src/services/controltower.jl @@ -144,10 +144,10 @@ end disable_control(control_identifier, target_identifier) disable_control(control_identifier, target_identifier, params::Dict{String,<:Any}) -This API call turns off a control. It starts an asynchronous operation that deletes AWS -resources on the specified organizational unit and the accounts it contains. The resources -will vary according to the control that you specify. For usage examples, see the Amazon -Web Services Control Tower User Guide . +This API call turns off a control. It starts an asynchronous operation that deletes Amazon +Web Services resources on the specified organizational unit and the accounts it contains. +The resources will vary according to the control that you specify. For usage examples, see +the Controls Reference Guide . # Arguments - `control_identifier`: The ARN of the control. Only Strongly recommended and Elective @@ -265,7 +265,7 @@ end This API call activates a control. It starts an asynchronous operation that creates Amazon Web Services resources on the specified organizational unit and the accounts it contains. The resources created will vary according to the control that you specify. For usage -examples, see the Amazon Web Services Control Tower User Guide . +examples, see the Controls Reference Guide . # Arguments - `control_identifier`: The ARN of the control. Only Strongly recommended and Elective @@ -406,7 +406,7 @@ end Returns the status of a particular EnableControl or DisableControl operation. Displays a message in case of error. Details for an operation are available for 90 days. For usage -examples, see the Amazon Web Services Control Tower User Guide . +examples, see the Controls Reference Guide . # Arguments - `operation_identifier`: The ID of the asynchronous operation, which is used to track @@ -490,8 +490,8 @@ end get_enabled_control(enabled_control_identifier) get_enabled_control(enabled_control_identifier, params::Dict{String,<:Any}) -Retrieves details about an enabled control. For usage examples, see the Amazon Web -Services Control Tower User Guide . +Retrieves details about an enabled control. For usage examples, see the Controls Reference +Guide . # Arguments - `enabled_control_identifier`: The controlIdentifier of the enabled control. @@ -644,7 +644,8 @@ end list_control_operations() list_control_operations(params::Dict{String,<:Any}) -Provides a list of operations in progress or queued. +Provides a list of operations in progress or queued. For usage examples, see +ListControlOperation examples. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -713,12 +714,12 @@ end list_enabled_controls(params::Dict{String,<:Any}) Lists the controls enabled by Amazon Web Services Control Tower on the specified -organizational unit and the accounts it contains. For usage examples, see the Amazon Web -Services Control Tower User Guide . +organizational unit and the accounts it contains. For usage examples, see the Controls +Reference Guide . # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"filter"`: An input filter for the ListCEnabledControls API that lets you select the +- `"filter"`: An input filter for the ListEnabledControls API that lets you select the types of control operations to view. - `"maxResults"`: How many results to return per API call. - `"nextToken"`: The token to continue the list from a previous API call with the same @@ -746,6 +747,41 @@ function list_enabled_controls( ) end +""" + list_landing_zone_operations() + list_landing_zone_operations(params::Dict{String,<:Any}) + +Lists all landing zone operations from the past 90 days. Results are sorted by time, with +the most recent operation first. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"filter"`: An input filter for the ListLandingZoneOperations API that lets you select + the types of landing zone operations to view. +- `"maxResults"`: How many results to return per API call. +- `"nextToken"`: The token to continue the list from a previous API call with the same + parameters. +""" +function list_landing_zone_operations(; aws_config::AbstractAWSConfig=global_aws_config()) + return controltower( + "POST", + "/list-landingzone-operations"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_landing_zone_operations( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return controltower( + "POST", + "/list-landingzone-operations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_landing_zones() list_landing_zones(params::Dict{String,<:Any}) @@ -781,8 +817,8 @@ end list_tags_for_resource(resource_arn) list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) -Returns a list of tags associated with the resource. For usage examples, see the Amazon -Web Services Control Tower User Guide . +Returns a list of tags associated with the resource. For usage examples, see the Controls +Reference Guide . # Arguments - `resource_arn`: The ARN of the resource. @@ -861,7 +897,10 @@ end reset_landing_zone(landing_zone_identifier, params::Dict{String,<:Any}) This API call resets a landing zone. It starts an asynchronous operation that resets the -landing zone to the parameters specified in its original configuration. +landing zone to the parameters specified in the original configuration, which you specified +in the manifest file. Nothing in the manifest file's original landing zone configuration is +changed during the reset process, by default. This API is not the same as a rollback of a +landing zone version, which is not a supported operation. # Arguments - `landing_zone_identifier`: The unique identifier of the landing zone. @@ -902,8 +941,7 @@ end tag_resource(resource_arn, tags) tag_resource(resource_arn, tags, params::Dict{String,<:Any}) -Applies tags to a resource. For usage examples, see the Amazon Web Services Control Tower -User Guide . +Applies tags to a resource. For usage examples, see the Controls Reference Guide . # Arguments - `resource_arn`: The ARN of the resource to be tagged. @@ -938,8 +976,7 @@ end untag_resource(resource_arn, tag_keys) untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) -Removes tags from a resource. For usage examples, see the Amazon Web Services Control -Tower User Guide . +Removes tags from a resource. For usage examples, see the Controls Reference Guide . # Arguments - `resource_arn`: The ARN of the resource. @@ -1036,11 +1073,11 @@ end EnablementStatus of SUCCEEDED, supply parameters that are different from the currently configured parameters. Otherwise, Amazon Web Services Control Tower will not accept the request. If the enabled control shows an EnablementStatus of FAILED, Amazon Web Services -Control Tower will update the control to match any valid parameters that you supply. If the +Control Tower updates the control to match any valid parameters that you supply. If the DriftSummary status for the control shows as DRIFTED, you cannot call this API. Instead, you can update the control by calling DisableControl and again calling EnableControl, or -you can run an extending governance operation. For usage examples, see the Amazon Web -Services Control Tower User Guide +you can run an extending governance operation. For usage examples, see the Controls +Reference Guide . # Arguments - `enabled_control_identifier`: The ARN of the enabled control that will be updated. @@ -1095,8 +1132,10 @@ specified in the updated manifest file. # Arguments - `landing_zone_identifier`: The unique identifier of the landing zone. -- `manifest`: The manifest JSON file is a text file that describes your Amazon Web Services - resources. For examples, review Launch your landing zone. +- `manifest`: The manifest file (JSON) is a text file that describes your Amazon Web + Services resources. For an example, review Launch your landing zone. The example manifest + file contains each of the available parameters. The schema for the landing zone's JSON + manifest file is not published, by design. - `version`: The landing zone version, for example, 3.2. """ diff --git a/src/services/datazone.jl b/src/services/datazone.jl index 522ae7d8a..95a5786ba 100644 --- a/src/services/datazone.jl +++ b/src/services/datazone.jl @@ -2751,6 +2751,48 @@ function get_iam_portal_login_url( ) end +""" + get_lineage_node(domain_identifier, identifier) + get_lineage_node(domain_identifier, identifier, params::Dict{String,<:Any}) + +Gets the data lineage node. + +# Arguments +- `domain_identifier`: The ID of the domain in which you want to get the data lineage node. +- `identifier`: The ID of the data lineage node that you want to get. Both, a lineage node + identifier generated by Amazon DataZone and a sourceIdentifier of the lineage node are + supported. If sourceIdentifier is greater than 1800 characters, you can use lineage node + identifier generated by Amazon DataZone to get the node details. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"timestamp"`: The event time stamp for which you want to get the data lineage node. +""" +function get_lineage_node( + domainIdentifier, identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/lineage/nodes/$(identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_lineage_node( + domainIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/lineage/nodes/$(identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_listing(domain_identifier, identifier) get_listing(domain_identifier, identifier, params::Dict{String,<:Any}) @@ -3602,6 +3644,62 @@ function list_environments( ) end +""" + list_lineage_node_history(domain_identifier, identifier) + list_lineage_node_history(domain_identifier, identifier, params::Dict{String,<:Any}) + +Lists the history of the specified data lineage node. + +# Arguments +- `domain_identifier`: The ID of the domain where you want to list the history of the + specified data lineage node. +- `identifier`: The ID of the data lineage node whose history you want to list. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"direction"`: The direction of the data lineage node refers to the lineage node having + neighbors in that direction. For example, if direction is UPSTREAM, the + ListLineageNodeHistory API responds with historical versions with upstream neighbors only. +- `"maxResults"`: The maximum number of history items to return in a single call to + ListLineageNodeHistory. When the number of memberships to be listed is greater than the + value of MaxResults, the response contains a NextToken value that you can use in a + subsequent call to ListLineageNodeHistory to list the next set of items. +- `"nextToken"`: When the number of history items is greater than the default value for the + MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than + the number of items, the response includes a pagination token named NextToken. You can + specify this NextToken value in a subsequent call to ListLineageNodeHistory to list the + next set of items. +- `"sortOrder"`: The order by which you want data lineage node history to be sorted. +- `"timestampGTE"`: Specifies whether the action is to return data lineage node history + from the time after the event timestamp. +- `"timestampLTE"`: Specifies whether the action is to return data lineage node history + from the time prior of the event timestamp. +""" +function list_lineage_node_history( + domainIdentifier, identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/lineage/nodes/$(identifier)/history"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_lineage_node_history( + domainIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/lineage/nodes/$(identifier)/history", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_metadata_generation_runs(domain_identifier) list_metadata_generation_runs(domain_identifier, params::Dict{String,<:Any}) @@ -4102,6 +4200,54 @@ function list_time_series_data_points( ) end +""" + post_lineage_event(domain_identifier, event) + post_lineage_event(domain_identifier, event, params::Dict{String,<:Any}) + +Posts a data lineage event. + +# Arguments +- `domain_identifier`: The ID of the domain where you want to post a data lineage event. +- `event`: The data lineage event that you want to post. Only open-lineage run event are + supported as events. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier that is provided to ensure the + idempotency of the request. +""" +function post_lineage_event( + domainIdentifier, event; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/lineage/events", + Dict{String,Any}("event" => event, "clientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function post_lineage_event( + domainIdentifier, + event, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/lineage/events", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("event" => event, "clientToken" => string(uuid4())), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ post_time_series_data_points(domain_identifier, entity_identifier, entity_type, forms) post_time_series_data_points(domain_identifier, entity_identifier, entity_type, forms, params::Dict{String,<:Any}) diff --git a/src/services/direct_connect.jl b/src/services/direct_connect.jl index 2fe23f907..1fb6be135 100644 --- a/src/services/direct_connect.jl +++ b/src/services/direct_connect.jl @@ -69,7 +69,7 @@ end allocate_connection_on_interconnect(bandwidth, connection_name, interconnect_id, owner_account, vlan) allocate_connection_on_interconnect(bandwidth, connection_name, interconnect_id, owner_account, vlan, params::Dict{String,<:Any}) -Deprecated. Use AllocateHostedConnection instead. Creates a hosted connection on an + Deprecated. Use AllocateHostedConnection instead. Creates a hosted connection on an interconnect. Allocates a VLAN number and a specified amount of bandwidth for use by a hosted connection on the specified interconnect. Intended for use by Direct Connect Partners only. @@ -149,9 +149,9 @@ Intended for use by Direct Connect Partners only. # Arguments - `bandwidth`: The bandwidth of the connection. The possible values are 50Mbps, 100Mbps, - 200Mbps, 300Mbps, 400Mbps, 500Mbps, 1Gbps, 2Gbps, 5Gbps, and 10Gbps. Note that only those - Direct Connect Partners who have met specific requirements are allowed to create a 1Gbps, - 2Gbps, 5Gbps or 10Gbps hosted connection. + 200Mbps, 300Mbps, 400Mbps, 500Mbps, 1Gbps, 2Gbps, 5Gbps, 10Gbps, and 25Gbps. Note that only + those Direct Connect Partners who have met specific requirements are allowed to create a + 1Gbps, 2Gbps, 5Gbps, 10Gbps, or 25Gbps hosted connection. - `connection_id`: The ID of the interconnect or LAG. - `connection_name`: The name of the hosted connection. - `owner_account`: The ID of the Amazon Web Services account ID of the customer for the @@ -1098,7 +1098,7 @@ the VLAN assigned to them by the Direct Connect Partner. Intended for use by Di Connect Partners only. # Arguments -- `bandwidth`: The port bandwidth, in Gbps. The possible values are 1 and 10. +- `bandwidth`: The port bandwidth, in Gbps. The possible values are 1, 10, and 100. - `interconnect_name`: The name of the interconnect. - `location`: The location of the interconnect. @@ -1155,28 +1155,28 @@ Creates a link aggregation group (LAG) with the specified number of bundled phys dedicated connections between the customer network and a specific Direct Connect location. A LAG is a logical interface that uses the Link Aggregation Control Protocol (LACP) to aggregate multiple interfaces, enabling you to treat them as a single interface. All -connections in a LAG must use the same bandwidth (either 1Gbps or 10Gbps) and must -terminate at the same Direct Connect endpoint. You can have up to 10 dedicated connections -per LAG. Regardless of this limit, if you request more connections for the LAG than Direct -Connect can allocate on a single endpoint, no LAG is created. You can specify an existing -physical dedicated connection or interconnect to include in the LAG (which counts towards -the total number of connections). Doing so interrupts the current physical dedicated -connection, and re-establishes them as a member of the LAG. The LAG will be created on the -same Direct Connect endpoint to which the dedicated connection terminates. Any virtual -interfaces associated with the dedicated connection are automatically disassociated and -re-associated with the LAG. The connection ID does not change. If the Amazon Web Services -account used to create a LAG is a registered Direct Connect Partner, the LAG is -automatically enabled to host sub-connections. For a LAG owned by a partner, any associated -virtual interfaces cannot be directly configured. +connections in a LAG must use the same bandwidth (either 1Gbps, 10Gbps, 100Gbps, or +400Gbps) and must terminate at the same Direct Connect endpoint. You can have up to 10 +dedicated connections per location. Regardless of this limit, if you request more +connections for the LAG than Direct Connect can allocate on a single endpoint, no LAG is +created.. You can specify an existing physical dedicated connection or interconnect to +include in the LAG (which counts towards the total number of connections). Doing so +interrupts the current physical dedicated connection, and re-establishes them as a member +of the LAG. The LAG will be created on the same Direct Connect endpoint to which the +dedicated connection terminates. Any virtual interfaces associated with the dedicated +connection are automatically disassociated and re-associated with the LAG. The connection +ID does not change. If the Amazon Web Services account used to create a LAG is a registered +Direct Connect Partner, the LAG is automatically enabled to host sub-connections. For a LAG +owned by a partner, any associated virtual interfaces cannot be directly configured. # Arguments - `connections_bandwidth`: The bandwidth of the individual physical dedicated connections - bundled by the LAG. The possible values are 1Gbps and 10Gbps. + bundled by the LAG. The possible values are 1Gbps,10Gbps, 100Gbps, and 400Gbps. - `lag_name`: The name of the LAG. - `location`: The location for the LAG. - `number_of_connections`: The number of physical dedicated connections initially provisioned and bundled by the LAG. You can have a maximum of four connections when the - port speed is 1G or 10G, or two when the port speed is 100G. + port speed is 1Gbps or 10Gbps, or two when the port speed is 100Gbps or 400Gbps. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1245,7 +1245,7 @@ gateway or a Virtual Private Gateway (VGW). Connecting the private virtual inter Direct Connect gateway enables the possibility for connecting to multiple VPCs, including VPCs in different Amazon Web Services Regions. Connecting the private virtual interface to a VGW only provides access to a single VPC within the same Region. Setting the MTU of a -virtual interface to 9001 (jumbo frames) can cause an update to the underlying physical +virtual interface to 8500 (jumbo frames) can cause an update to the underlying physical connection if it wasn't updated to support jumbo frames. Updating the connection disrupts network connectivity for all virtual interfaces associated with the connection for up to 30 seconds. To check whether your connection supports jumbo frames, call DescribeConnections. @@ -1691,7 +1691,7 @@ end describe_connection_loa(connection_id) describe_connection_loa(connection_id, params::Dict{String,<:Any}) -Deprecated. Use DescribeLoa instead. Gets the LOA-CFA for a connection. The Letter of + Deprecated. Use DescribeLoa instead. Gets the LOA-CFA for a connection. The Letter of Authorization - Connecting Facility Assignment (LOA-CFA) is a document that your APN partner or service provider uses when establishing your cross connect to Amazon Web Services at the colocation facility. For more information, see Requesting Cross Connects at @@ -1763,7 +1763,7 @@ end describe_connections_on_interconnect(interconnect_id) describe_connections_on_interconnect(interconnect_id, params::Dict{String,<:Any}) -Deprecated. Use DescribeHostedConnections instead. Lists the connections that have been + Deprecated. Use DescribeHostedConnections instead. Lists the connections that have been provisioned on the specified interconnect. Intended for use by Direct Connect Partners only. @@ -2017,7 +2017,7 @@ end describe_interconnect_loa(interconnect_id) describe_interconnect_loa(interconnect_id, params::Dict{String,<:Any}) -Deprecated. Use DescribeLoa instead. Gets the LOA-CFA for the specified interconnect. The + Deprecated. Use DescribeLoa instead. Gets the LOA-CFA for the specified interconnect. The Letter of Authorization - Connecting Facility Assignment (LOA-CFA) is a document that is used when establishing your cross connect to Amazon Web Services at the colocation facility. For more information, see Requesting Cross Connects at Direct Connect Locations @@ -2252,8 +2252,10 @@ end describe_virtual_gateways() describe_virtual_gateways(params::Dict{String,<:Any}) -Lists the virtual private gateways owned by the Amazon Web Services account. You can create -one or more Direct Connect private virtual interfaces linked to a virtual private gateway. + Deprecated. Use DescribeVpnGateways instead. See DescribeVPNGateways in the Amazon Elastic +Compute Cloud API Reference. Lists the virtual private gateways owned by the Amazon Web +Services account. You can create one or more Direct Connect private virtual interfaces +linked to a virtual private gateway. """ function describe_virtual_gateways(; aws_config::AbstractAWSConfig=global_aws_config()) @@ -2773,7 +2775,7 @@ end update_virtual_interface_attributes(virtual_interface_id, params::Dict{String,<:Any}) Updates the specified attributes of the specified virtual private interface. Setting the -MTU of a virtual interface to 9001 (jumbo frames) can cause an update to the underlying +MTU of a virtual interface to 8500 (jumbo frames) can cause an update to the underlying physical connection if it wasn't updated to support jumbo frames. Updating the connection disrupts network connectivity for all virtual interfaces associated with the connection for up to 30 seconds. To check whether your connection supports jumbo frames, call @@ -2787,7 +2789,7 @@ DescribeVirtualInterfaces. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"enableSiteLink"`: Indicates whether to enable or disable SiteLink. - `"mtu"`: The maximum transmission unit (MTU), in bytes. The supported values are 1500 and - 9001. The default value is 1500. + 8500. The default value is 1500. - `"virtualInterfaceName"`: The name of the virtual private interface. """ function update_virtual_interface_attributes( diff --git a/src/services/ec2.jl b/src/services/ec2.jl index 505cf5fd8..ea920bdbb 100644 --- a/src/services/ec2.jl +++ b/src/services/ec2.jl @@ -4108,10 +4108,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"DryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. -- `"PreserveClientIp"`: Indicates whether your client's IP address is preserved as the - source. The value is true or false. If true, your client's IP address is used when you - connect to a resource. If false, the elastic network interface IP address is used when - you connect to a resource. Default: true +- `"PreserveClientIp"`: Indicates whether the client IP address is preserved as the source. + The following are the possible values. true - Use the client IP address as the source. + false - Use the network interface IP address as the source. Default: false - `"SecurityGroupId"`: One or more security groups to associate with the endpoint. If you don't specify a security group, the default security group for your VPC will be associated with the endpoint. @@ -7549,7 +7548,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys enable Multi-Attach, you can attach the volume to up to 16 Instances built on the Nitro System in the same Availability Zone. This parameter is supported with io1 and io2 volumes only. For more information, see Amazon EBS Multi-Attach in the Amazon EBS User Guide. -- `"OutpostArn"`: The Amazon Resource Name (ARN) of the Outpost. +- `"OutpostArn"`: The Amazon Resource Name (ARN) of the Outpost on which to create the + volume. If you intend to use a volume with an instance running on an outpost, then you must + create the volume on the same outpost as the instance. You can't use a volume created in an + Amazon Web Services Region with an instance on an Amazon Web Services outpost, or the other + way around. - `"Size"`: The size of the volume, in GiBs. You must specify either a snapshot ID or a volume size. If you specify a snapshot, the default is the snapshot size. You can specify a volume size that is equal to or larger than the snapshot size. The following are the @@ -18215,7 +18218,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys assigned a tag with a specific key, regardless of the tag value. volume-id - The volume ID. volume-type - The Amazon EBS volume type (gp2 | gp3 | io1 | io2 | st1 | sc1| standard) -- `"VolumeId"`: The volume IDs. +- `"VolumeId"`: The volume IDs. If not specified, then all volumes are included in the + response. - `"dryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. @@ -25306,9 +25310,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Virtual Function interface for the instance. There is no way to disable enhanced networking with the Intel 82599 Virtual Function interface at this time. This option is supported only for HVM instances. Specifying this option with a PV instance can make it unreachable. -- `"userData"`: Changes the instance's user data to the specified value. If you are using - an Amazon Web Services SDK or command line tool, base64-encoding is performed for you, and - you can load the text from a file. Otherwise, you must provide base64-encoded text. +- `"userData"`: Changes the instance's user data to the specified value. User data must be + base64-encoded. Depending on the tool or SDK that you're using, the base64-encoding might + be performed for you. For more information, see Work with instance user data. - `"value"`: A new value for the attribute. Use only with the kernel, ramdisk, userData, disableApiTermination, or instanceInitiatedShutdownBehavior attribute. """ @@ -30951,11 +30955,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys launch. You can specify tags for the following resources only: Instances Volumes Spot Instance requests Network interfaces To tag a resource after it has been created, see CreateTags. -- `"UserData"`: The user data script to make available to the instance. For more - information, see Run commands on your Amazon EC2 instance at launch in the Amazon EC2 User - Guide. If you are using a command line tool, base64-encoding is performed for you, and you - can load the text from a file. Otherwise, you must provide base64-encoded text. User data - is limited to 16 KB. +- `"UserData"`: The user data to make available to the instance. User data must be + base64-encoded. Depending on the tool or SDK that you're using, the base64-encoding might + be performed for you. For more information, see Work with instance user data. - `"additionalInfo"`: Reserved. - `"clientToken"`: Unique, case-sensitive identifier you provide to ensure the idempotency of the request. If you do not specify a client token, a randomly generated token is used diff --git a/src/services/eks.jl b/src/services/eks.jl index 0c0fe2861..010cf3660 100644 --- a/src/services/eks.jl +++ b/src/services/eks.jl @@ -394,6 +394,10 @@ Launching Amazon EKS nodes in the Amazon EKS User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"accessConfig"`: The access configuration for the cluster. +- `"bootstrapSelfManagedAddons"`: If you set this value to False when creating a cluster, + the default networking add-ons will not be installed. The default networking addons include + vpc-cni, coredns, and kube-proxy. Use this option when you plan to install third-party + alternative add-ons or self-manage the default networking add-ons. - `"clientRequestToken"`: A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. - `"encryptionConfig"`: The encryption configuration for the cluster. diff --git a/src/services/glue.jl b/src/services/glue.jl index 381075a80..7aba9bc4a 100644 --- a/src/services/glue.jl +++ b/src/services/glue.jl @@ -4194,6 +4194,8 @@ Retrieves all databases defined in a given Data Catalog. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AttributesToGet"`: Specifies the database fields returned by the GetDatabases call. + This parameter doesn’t accept an empty list. The request must include the NAME. - `"CatalogId"`: The ID of the Data Catalog from which to retrieve Databases. If none is provided, the Amazon Web Services account ID is used by default. - `"MaxResults"`: The maximum number of databases to return in one response. diff --git a/src/services/ivs_realtime.jl b/src/services/ivs_realtime.jl index 8785c3e1d..c298347d4 100644 --- a/src/services/ivs_realtime.jl +++ b/src/services/ivs_realtime.jl @@ -100,8 +100,8 @@ Creates a new stage (and optionally participant tokens). # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"autoParticipantRecordingConfiguration"`: Auto participant recording configuration - object attached to the stage. +- `"autoParticipantRecordingConfiguration"`: Configuration object for individual + participant recording, to attach to the new stage. - `"name"`: Optional name that can be specified for the stage being created. - `"participantTokenConfigurations"`: Array of participant token configuration objects to attach to the new stage. @@ -203,6 +203,38 @@ function delete_encoder_configuration( ) end +""" + delete_public_key(arn) + delete_public_key(arn, params::Dict{String,<:Any}) + +Deletes the specified public key used to sign stage participant tokens. This invalidates +future participant tokens generated using the key pair’s private key. + +# Arguments +- `arn`: ARN of the public key to be deleted. + +""" +function delete_public_key(arn; aws_config::AbstractAWSConfig=global_aws_config()) + return ivs_realtime( + "POST", + "/DeletePublicKey", + Dict{String,Any}("arn" => arn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_public_key( + arn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return ivs_realtime( + "POST", + "/DeletePublicKey", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("arn" => arn), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_stage(arn) delete_stage(arn, params::Dict{String,<:Any}) @@ -434,6 +466,37 @@ function get_participant( ) end +""" + get_public_key(arn) + get_public_key(arn, params::Dict{String,<:Any}) + +Gets information for the specified public key. + +# Arguments +- `arn`: ARN of the public key for which the information is to be retrieved. + +""" +function get_public_key(arn; aws_config::AbstractAWSConfig=global_aws_config()) + return ivs_realtime( + "POST", + "/GetPublicKey", + Dict{String,Any}("arn" => arn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_public_key( + arn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return ivs_realtime( + "POST", + "/GetPublicKey", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("arn" => arn), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_stage(arn) get_stage(arn, params::Dict{String,<:Any}) @@ -539,6 +602,52 @@ function get_storage_configuration( ) end +""" + import_public_key(public_key_material) + import_public_key(public_key_material, params::Dict{String,<:Any}) + +Import a public key to be used for signing stage participant tokens. + +# Arguments +- `public_key_material`: The content of the public key to be imported. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"name"`: Name of the public key to be imported. +- `"tags"`: Tags attached to the resource. Array of maps, each of the form string:string + (key:value). See Tagging AWS Resources for details, including restrictions that apply to + tags and \"Tag naming limits and requirements\"; Amazon IVS has no constraints on tags + beyond what is documented there. +""" +function import_public_key( + publicKeyMaterial; aws_config::AbstractAWSConfig=global_aws_config() +) + return ivs_realtime( + "POST", + "/ImportPublicKey", + Dict{String,Any}("publicKeyMaterial" => publicKeyMaterial); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function import_public_key( + publicKeyMaterial, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return ivs_realtime( + "POST", + "/ImportPublicKey", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("publicKeyMaterial" => publicKeyMaterial), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_compositions() list_compositions(params::Dict{String,<:Any}) @@ -725,6 +834,36 @@ function list_participants( ) end +""" + list_public_keys() + list_public_keys(params::Dict{String,<:Any}) + +Gets summary information about all public keys in your account, in the AWS region where the +API request is processed. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: Maximum number of results to return. Default: 50. +- `"nextToken"`: The first public key to retrieve. This is used for pagination; see the + nextToken response field. +""" +function list_public_keys(; aws_config::AbstractAWSConfig=global_aws_config()) + return ivs_realtime( + "POST", "/ListPublicKeys"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_public_keys( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return ivs_realtime( + "POST", + "/ListPublicKeys", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_stage_sessions(stage_arn) list_stage_sessions(stage_arn, params::Dict{String,<:Any}) @@ -1049,9 +1188,9 @@ Updates a stage’s configuration. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"autoParticipantRecordingConfiguration"`: Auto-participant-recording configuration - object to attach to the stage. Auto-participant-recording configuration cannot be updated - while recording is active. +- `"autoParticipantRecordingConfiguration"`: Configuration object for individual + participant recording, to attach to the stage. Note that this cannot be updated while + recording is active. - `"name"`: Name of the stage to be updated. """ function update_stage(arn; aws_config::AbstractAWSConfig=global_aws_config()) diff --git a/src/services/kinesis_analytics_v2.jl b/src/services/kinesis_analytics_v2.jl index d8f45ba1f..832d11d6c 100644 --- a/src/services/kinesis_analytics_v2.jl +++ b/src/services/kinesis_analytics_v2.jl @@ -1012,6 +1012,52 @@ function describe_application( ) end +""" + describe_application_operation(application_name, operation_id) + describe_application_operation(application_name, operation_id, params::Dict{String,<:Any}) + +Returns information about a specific operation performed on a Managed Service for Apache +Flink application + +# Arguments +- `application_name`: +- `operation_id`: + +""" +function describe_application_operation( + ApplicationName, OperationId; aws_config::AbstractAWSConfig=global_aws_config() +) + return kinesis_analytics_v2( + "DescribeApplicationOperation", + Dict{String,Any}( + "ApplicationName" => ApplicationName, "OperationId" => OperationId + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_application_operation( + ApplicationName, + OperationId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return kinesis_analytics_v2( + "DescribeApplicationOperation", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ApplicationName" => ApplicationName, "OperationId" => OperationId + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_application_snapshot(application_name, snapshot_name) describe_application_snapshot(application_name, snapshot_name, params::Dict{String,<:Any}) @@ -1164,6 +1210,50 @@ function discover_input_schema( ) end +""" + list_application_operations(application_name) + list_application_operations(application_name, params::Dict{String,<:Any}) + +Lists information about operations performed on a Managed Service for Apache Flink +application + +# Arguments +- `application_name`: + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Limit"`: +- `"NextToken"`: +- `"Operation"`: +- `"OperationStatus"`: +""" +function list_application_operations( + ApplicationName; aws_config::AbstractAWSConfig=global_aws_config() +) + return kinesis_analytics_v2( + "ListApplicationOperations", + Dict{String,Any}("ApplicationName" => ApplicationName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_application_operations( + ApplicationName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return kinesis_analytics_v2( + "ListApplicationOperations", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("ApplicationName" => ApplicationName), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_application_snapshots(application_name) list_application_snapshots(application_name, params::Dict{String,<:Any}) @@ -1324,11 +1414,10 @@ end rollback_application(application_name, current_application_version_id, params::Dict{String,<:Any}) Reverts the application to the previous running version. You can roll back an application -if you suspect it is stuck in a transient status. You can roll back an application only if -it is in the UPDATING or AUTOSCALING status. When you rollback an application, it loads -state data from the last successful snapshot. If the application has no snapshots, Managed -Service for Apache Flink rejects the rollback request. This action is not supported for -Managed Service for Apache Flink for SQL applications. +if you suspect it is stuck in a transient status or in the running status. You can roll +back an application only if it is in the UPDATING, AUTOSCALING, or RUNNING statuses. When +you rollback an application, it loads state data from the last successful snapshot. If the +application has no snapshots, Managed Service for Apache Flink rejects the rollback request. # Arguments - `application_name`: The name of the application. diff --git a/src/services/mq.jl b/src/services/mq.jl index 69f6b4e24..8d8ef4ffb 100644 --- a/src/services/mq.jl +++ b/src/services/mq.jl @@ -5,8 +5,8 @@ using AWS.Compat using AWS.UUIDs """ - create_broker(auto_minor_version_upgrade, broker_name, deployment_mode, engine_type, engine_version, host_instance_type, publicly_accessible, users) - create_broker(auto_minor_version_upgrade, broker_name, deployment_mode, engine_type, engine_version, host_instance_type, publicly_accessible, users, params::Dict{String,<:Any}) + create_broker(broker_name, deployment_mode, engine_type, host_instance_type, publicly_accessible, users) + create_broker(broker_name, deployment_mode, engine_type, host_instance_type, publicly_accessible, users, params::Dict{String,<:Any}) Creates a broker. Note: This API is asynchronous. To create a broker, you must either use the AmazonMQFullAccess IAM policy or include the following EC2 permissions in your IAM @@ -21,10 +21,6 @@ Your Amazon Web Services Credentials and Never Modify or Delete the Amazon MQ El Network Interface in the Amazon MQ Developer Guide. # Arguments -- `auto_minor_version_upgrade`: Enables automatic upgrades to new minor versions for - brokers, as new versions are released and supported by Amazon MQ. Automatic upgrades occur - during the scheduled maintenance window of the broker or after a manual broker reboot. Set - to true by default, if no value is specified. - `broker_name`: Required. The broker's name. This value must be unique in your Amazon Web Services account, 1-50 characters long, must contain only letters, numbers, dashes, and underscores, and must not contain white spaces, brackets, wildcard characters, or special @@ -35,8 +31,6 @@ Network Interface in the Amazon MQ Developer Guide. - `deployment_mode`: Required. The broker's deployment mode. - `engine_type`: Required. The type of broker engine. Currently, Amazon MQ supports ACTIVEMQ and RABBITMQ. -- `engine_version`: Required. The broker engine's version. For a list of supported engine - versions, see Supported engines. - `host_instance_type`: Required. The broker's instance type. - `publicly_accessible`: Enables connections from applications outside of the VPC that hosts the broker's subnets. Set to false by default, if no value is provided. @@ -49,6 +43,11 @@ Network Interface in the Amazon MQ Developer Guide. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"authenticationStrategy"`: Optional. The authentication strategy used to secure the broker. The default is SIMPLE. +- `"autoMinorVersionUpgrade"`: Enables automatic upgrades to new patch versions for brokers + as new versions are released and supported by Amazon MQ. Automatic upgrades occur during + the scheduled maintenance window or after a manual broker reboot. Set to true by default, + if no value is specified. Must be set to true for ActiveMQ brokers version 5.18 and above + and for RabbitMQ brokers version 3.13 and above. - `"configuration"`: A list of information about the configuration. - `"creatorRequestId"`: The unique ID that the requester receives for the created broker. Amazon MQ passes your ID with the API action. We recommend using a Universally Unique @@ -59,6 +58,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys that is used to replicate data from in a data replication pair, and is applied to the replica broker. Must be set when dataReplicationMode is set to CRDR. - `"encryptionOptions"`: Encryption options for the broker. +- `"engineVersion"`: The broker engine version. Defaults to the latest available version + for the specified broker engine type. For more information, see the ActiveMQ version + management and the RabbitMQ version management sections in the Amazon MQ Developer Guide. - `"ldapServerMetadata"`: Optional. The metadata of the LDAP server used to authenticate and authorize connections to the broker. Does not apply to RabbitMQ brokers. - `"logs"`: Enables Amazon CloudWatch logging for brokers. @@ -81,11 +83,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"tags"`: Create tags when creating the broker. """ function create_broker( - autoMinorVersionUpgrade, brokerName, deploymentMode, engineType, - engineVersion, hostInstanceType, publiclyAccessible, users; @@ -95,11 +95,9 @@ function create_broker( "POST", "/v1/brokers", Dict{String,Any}( - "autoMinorVersionUpgrade" => autoMinorVersionUpgrade, "brokerName" => brokerName, "deploymentMode" => deploymentMode, "engineType" => engineType, - "engineVersion" => engineVersion, "hostInstanceType" => hostInstanceType, "publiclyAccessible" => publiclyAccessible, "users" => users, @@ -110,11 +108,9 @@ function create_broker( ) end function create_broker( - autoMinorVersionUpgrade, brokerName, deploymentMode, engineType, - engineVersion, hostInstanceType, publiclyAccessible, users, @@ -128,11 +124,9 @@ function create_broker( mergewith( _merge, Dict{String,Any}( - "autoMinorVersionUpgrade" => autoMinorVersionUpgrade, "brokerName" => brokerName, "deploymentMode" => deploymentMode, "engineType" => engineType, - "engineVersion" => engineVersion, "hostInstanceType" => hostInstanceType, "publiclyAccessible" => publiclyAccessible, "users" => users, @@ -147,8 +141,8 @@ function create_broker( end """ - create_configuration(engine_type, engine_version, name) - create_configuration(engine_type, engine_version, name, params::Dict{String,<:Any}) + create_configuration(engine_type, name) + create_configuration(engine_type, name, params::Dict{String,<:Any}) Creates a new configuration for the specified configuration name. Amazon MQ uses the default configuration (the engine type and version). @@ -156,8 +150,6 @@ default configuration (the engine type and version). # Arguments - `engine_type`: Required. The type of broker engine. Currently, Amazon MQ supports ACTIVEMQ and RABBITMQ. -- `engine_version`: Required. The broker engine's version. For a list of supported engine - versions, see Supported engines. - `name`: Required. The name of the configuration. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 1-150 characters long. @@ -166,24 +158,24 @@ default configuration (the engine type and version). Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"authenticationStrategy"`: Optional. The authentication strategy associated with the configuration. The default is SIMPLE. +- `"engineVersion"`: The broker engine version. Defaults to the latest available version + for the specified broker engine type. For more information, see the ActiveMQ version + management and the RabbitMQ version management sections in the Amazon MQ Developer Guide. - `"tags"`: Create tags when creating the configuration. """ function create_configuration( - engineType, engineVersion, name; aws_config::AbstractAWSConfig=global_aws_config() + engineType, name; aws_config::AbstractAWSConfig=global_aws_config() ) return mq( "POST", "/v1/configurations", - Dict{String,Any}( - "engineType" => engineType, "engineVersion" => engineVersion, "name" => name - ); + Dict{String,Any}("engineType" => engineType, "name" => name); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end function create_configuration( engineType, - engineVersion, name, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), @@ -193,13 +185,7 @@ function create_configuration( "/v1/configurations", Dict{String,Any}( mergewith( - _merge, - Dict{String,Any}( - "engineType" => engineType, - "engineVersion" => engineVersion, - "name" => name, - ), - params, + _merge, Dict{String,Any}("engineType" => engineType, "name" => name), params ), ); aws_config=aws_config, @@ -861,13 +847,16 @@ Adds a pending configuration change to a broker. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"authenticationStrategy"`: Optional. The authentication strategy used to secure the broker. The default is SIMPLE. -- `"autoMinorVersionUpgrade"`: Enables automatic upgrades to new minor versions for - brokers, as new versions are released and supported by Amazon MQ. Automatic upgrades occur - during the scheduled maintenance window of the broker or after a manual broker reboot. +- `"autoMinorVersionUpgrade"`: Enables automatic upgrades to new patch versions for brokers + as new versions are released and supported by Amazon MQ. Automatic upgrades occur during + the scheduled maintenance window or after a manual broker reboot. Must be set to true for + ActiveMQ brokers version 5.18 and above and for RabbitMQ brokers version 3.13 and above. - `"configuration"`: A list of information about the configuration. - `"dataReplicationMode"`: Defines whether this broker is a part of a data replication pair. -- `"engineVersion"`: The broker engine version. For a list of supported engine versions, - see Supported engines. +- `"engineVersion"`: The broker engine version. For more information, see the ActiveMQ + version management and the RabbitMQ version management sections in the Amazon MQ Developer + Guide. When upgrading to ActiveMQ version 5.18 and above or RabbitMQ version 3.13 and + above, you must have autoMinorVersionUpgrade set to true for the broker. - `"hostInstanceType"`: The broker's host instance type to upgrade to. For a list of supported instance types, see Broker instance types. - `"ldapServerMetadata"`: Optional. The metadata of the LDAP server used to authenticate diff --git a/src/services/opensearch.jl b/src/services/opensearch.jl index c45c383de..282f8ec46 100644 --- a/src/services/opensearch.jl +++ b/src/services/opensearch.jl @@ -297,6 +297,7 @@ managing Amazon OpenSearch Service domains. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AIMLOptions"`: Options for all machine learning features for the specified domain. - `"AccessPolicies"`: Identity and Access Management (IAM) policy document specifying the access policies for the new domain. - `"AdvancedOptions"`: Key-value pairs to specify advanced configuration options. The @@ -2355,6 +2356,7 @@ Modifies the cluster configuration of the specified Amazon OpenSearch Service do # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AIMLOptions"`: Options for all machine learning features for the specified domain. - `"AccessPolicies"`: Identity and Access Management (IAM) access policy as a JSON-formatted string. - `"AdvancedOptions"`: Key-value pairs to specify advanced configuration options. The diff --git a/src/services/organizations.jl b/src/services/organizations.jl index ffb0df1a7..83614b04b 100644 --- a/src/services/organizations.jl +++ b/src/services/organizations.jl @@ -233,24 +233,23 @@ from the organization's management account. For more information about creating see Creating a member account in your organization in the Organizations User Guide. When you create an account in an organization using the Organizations console, API, or CLI commands, the information required for the account to operate as a standalone account, such -as a payment method and signing the end user license agreement (EULA) is not automatically -collected. If you must remove an account from your organization later, you can do so only -after you provide the missing information. For more information, see Considerations before -removing an account from an organization in the Organizations User Guide. If you get an -exception that indicates that you exceeded your account limits for the organization, -contact Amazon Web Services Support. If you get an exception that indicates that the -operation failed because your organization is still initializing, wait one hour and then -try again. If the error persists, contact Amazon Web Services Support. Using -CreateAccount to create multiple temporary accounts isn't recommended. You can only close -an account from the Billing and Cost Management console, and you must be signed in as the -root user. For information on the requirements and process for closing an account, see -Closing a member account in your organization in the Organizations User Guide. When you -create a member account with this operation, you can choose whether to create the account -with the IAM User and Role Access to Billing Information switch enabled. If you enable it, -IAM users and roles that have appropriate permissions can view billing information for the -account. If you disable it, only the account root user can access billing information. For -information about how to disable this switch for an account, see Granting access to your -billing information and tools. +as a payment method is not automatically collected. If you must remove an account from your +organization later, you can do so only after you provide the missing information. For more +information, see Considerations before removing an account from an organization in the +Organizations User Guide. If you get an exception that indicates that you exceeded your +account limits for the organization, contact Amazon Web Services Support. If you get an +exception that indicates that the operation failed because your organization is still +initializing, wait one hour and then try again. If the error persists, contact Amazon Web +Services Support. Using CreateAccount to create multiple temporary accounts isn't +recommended. You can only close an account from the Billing and Cost Management console, +and you must be signed in as the root user. For information on the requirements and process +for closing an account, see Closing a member account in your organization in the +Organizations User Guide. When you create a member account with this operation, you can +choose whether to create the account with the IAM User and Role Access to Billing +Information switch enabled. If you enable it, IAM users and roles that have appropriate +permissions can view billing information for the account. If you disable it, only the +account root user can access billing information. For information about how to disable this +switch for an account, see Granting access to your billing information and tools. # Arguments - `account_name`: The friendly name of the member account. diff --git a/src/services/payment_cryptography_data.jl b/src/services/payment_cryptography_data.jl index cd9714f20..08697fd46 100644 --- a/src/services/payment_cryptography_data.jl +++ b/src/services/payment_cryptography_data.jl @@ -32,8 +32,14 @@ operations: EncryptData GetPublicCertificate ImportKey - `cipher_text`: The ciphertext to decrypt. - `decryption_attributes`: The encryption key type and attributes for ciphertext decryption. - `key_identifier`: The keyARN of the encryption key that Amazon Web Services Payment - Cryptography uses for ciphertext decryption. + Cryptography uses for ciphertext decryption. When a WrappedKeyBlock is provided, this value + will be the identifier to the key wrapping key. Otherwise, it is the key identifier used to + perform the operation. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"WrappedKey"`: The WrappedKeyBlock containing the encryption key for ciphertext + decryption. """ function decrypt_data( CipherText, @@ -106,12 +112,18 @@ ImportKey ReEncryptData # Arguments - `encryption_attributes`: The encryption key type and attributes for plaintext encryption. - `key_identifier`: The keyARN of the encryption key that Amazon Web Services Payment - Cryptography uses for plaintext encryption. + Cryptography uses for plaintext encryption. When a WrappedKeyBlock is provided, this value + will be the identifier to the key wrapping key. Otherwise, it is the key identifier used to + perform the operation. - `plain_text`: The plaintext to be encrypted. For encryption using asymmetric keys, plaintext data length is constrained by encryption key strength that you define in KeyAlgorithm and padding type that you define in AsymmetricEncryptionAttributes. For more information, see Encrypt data in the Amazon Web Services Payment Cryptography User Guide. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"WrappedKey"`: The WrappedKeyBlock containing the encryption key for plaintext + encryption. """ function encrypt_data( EncryptionAttributes, @@ -396,32 +408,37 @@ end re_encrypt_data(cipher_text, incoming_encryption_attributes, incoming_key_identifier, outgoing_encryption_attributes, outgoing_key_identifier) re_encrypt_data(cipher_text, incoming_encryption_attributes, incoming_key_identifier, outgoing_encryption_attributes, outgoing_key_identifier, params::Dict{String,<:Any}) -Re-encrypt ciphertext using DUKPT, Symmetric and Asymmetric Data Encryption Keys. You can -either generate an encryption key within Amazon Web Services Payment Cryptography by -calling CreateKey or import your own encryption key by calling ImportKey. The KeyArn for -use with this operation must be in a compatible key state with KeyModesOfUse set to -Encrypt. In asymmetric encryption, ciphertext is encrypted using public component (imported -by calling ImportKey) of the asymmetric key pair created outside of Amazon Web Services -Payment Cryptography. For symmetric and DUKPT encryption, Amazon Web Services Payment -Cryptography supports TDES and AES algorithms. For asymmetric encryption, Amazon Web -Services Payment Cryptography supports RSA. To encrypt using DUKPT, a DUKPT key must -already exist within your account with KeyModesOfUse set to DeriveKey or a new DUKPT can be -generated by calling CreateKey. For information about valid keys for this operation, see -Understanding key attributes and Key types for specific data operations in the Amazon Web -Services Payment Cryptography User Guide. Cross-account use: This operation can't be used -across different Amazon Web Services accounts. Related operations: DecryptData -EncryptData GetPublicCertificate ImportKey +Re-encrypt ciphertext using DUKPT or Symmetric data encryption keys. You can either +generate an encryption key within Amazon Web Services Payment Cryptography by calling +CreateKey or import your own encryption key by calling ImportKey. The KeyArn for use with +this operation must be in a compatible key state with KeyModesOfUse set to Encrypt. For +symmetric and DUKPT encryption, Amazon Web Services Payment Cryptography supports TDES and +AES algorithms. To encrypt using DUKPT, a DUKPT key must already exist within your account +with KeyModesOfUse set to DeriveKey or a new DUKPT can be generated by calling CreateKey. +For information about valid keys for this operation, see Understanding key attributes and +Key types for specific data operations in the Amazon Web Services Payment Cryptography User +Guide. Cross-account use: This operation can't be used across different Amazon Web +Services accounts. Related operations: DecryptData EncryptData +GetPublicCertificate ImportKey # Arguments - `cipher_text`: Ciphertext to be encrypted. The minimum allowed length is 16 bytes and maximum allowed length is 4096 bytes. - `incoming_encryption_attributes`: The attributes and values for incoming ciphertext. - `incoming_key_identifier`: The keyARN of the encryption key of incoming ciphertext data. + When a WrappedKeyBlock is provided, this value will be the identifier to the key wrapping + key. Otherwise, it is the key identifier used to perform the operation. - `outgoing_encryption_attributes`: The attributes and values for outgoing ciphertext data after encryption by Amazon Web Services Payment Cryptography. - `outgoing_key_identifier`: The keyARN of the encryption key of outgoing ciphertext data after encryption by Amazon Web Services Payment Cryptography. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"IncomingWrappedKey"`: The WrappedKeyBlock containing the encryption key of incoming + ciphertext data. +- `"OutgoingWrappedKey"`: The WrappedKeyBlock containing the encryption key of outgoing + ciphertext data after encryption by Amazon Web Services Payment Cryptography. """ function re_encrypt_data( CipherText, @@ -500,7 +517,9 @@ operations: GeneratePinData VerifyPinData - `encrypted_pin_block`: The encrypted PIN block data that Amazon Web Services Payment Cryptography translates. - `incoming_key_identifier`: The keyARN of the encryption key under which incoming PIN - block data is encrypted. This key type can be PEK or BDK. + block data is encrypted. This key type can be PEK or BDK. When a WrappedKeyBlock is + provided, this value will be the identifier to the key wrapping key for PIN block. + Otherwise, it is the key identifier used to perform the operation. - `incoming_translation_attributes`: The format of the incoming PIN block data for translation within Amazon Web Services Payment Cryptography. - `outgoing_key_identifier`: The keyARN of the encryption key for encrypting outgoing PIN @@ -512,8 +531,12 @@ operations: GeneratePinData VerifyPinData Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"IncomingDukptAttributes"`: The attributes and values to use for incoming DUKPT encryption key for PIN block translation. +- `"IncomingWrappedKey"`: The WrappedKeyBlock containing the encryption key under which + incoming PIN block data is encrypted. - `"OutgoingDukptAttributes"`: The attributes and values to use for outgoing DUKPT encryption key after PIN block translation. +- `"OutgoingWrappedKey"`: The WrappedKeyBlock containing the encryption key for encrypting + outgoing PIN block data. """ function translate_pin_data( EncryptedPinBlock, diff --git a/src/services/pi.jl b/src/services/pi.jl index 8d1ac9c6e..594131469 100644 --- a/src/services/pi.jl +++ b/src/services/pi.jl @@ -173,7 +173,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys \"AdditionalMetrics\" : { \"string\" : \"string\" }. - `"Filter"`: One or more filters to apply in the request. Restrictions: Any number of filters by the same dimension, as specified in the GroupBy or Partition parameters. A - single filter for any other dimension in this dimension group. + single filter for any other dimension in this dimension group. The db.sql.db_id filter + isn't available for RDS for SQL Server DB instances. - `"MaxResults"`: The maximum number of items to return in the response. If more items exist than the specified MaxRecords value, a pagination token is included in the response so that the remaining results can be retrieved. diff --git a/src/services/qapps.jl b/src/services/qapps.jl new file mode 100644 index 000000000..978be8f08 --- /dev/null +++ b/src/services/qapps.jl @@ -0,0 +1,1189 @@ +# This file is auto-generated by AWSMetadata.jl +using AWS +using AWS.AWSServices: qapps +using AWS.Compat +using AWS.UUIDs + +""" + associate_library_item_review(instance-id, library_item_id) + associate_library_item_review(instance-id, library_item_id, params::Dict{String,<:Any}) + +Associates a rating or review for a library item with the user submitting the request. This +increments the rating count for the specified library item. + +# Arguments +- `instance-id`: The unique identifier for the Amazon Q Business application environment + instance. +- `library_item_id`: The unique identifier of the library item to associate the review with. + +""" +function associate_library_item_review( + instance_id, libraryItemId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qapps( + "POST", + "/catalog.associateItemRating", + Dict{String,Any}( + "libraryItemId" => libraryItemId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function associate_library_item_review( + instance_id, + libraryItemId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/catalog.associateItemRating", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "libraryItemId" => libraryItemId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + associate_qapp_with_user(app_id, instance-id) + associate_qapp_with_user(app_id, instance-id, params::Dict{String,<:Any}) + +This operation creates a link between the user's identity calling the operation and a +specific Q App. This is useful to mark the Q App as a favorite for the user if the user +doesn't own the Amazon Q App so they can still run it and see it in their inventory of Q +Apps. + +# Arguments +- `app_id`: The ID of the Amazon Q App to associate with the user. +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. + +""" +function associate_qapp_with_user( + appId, instance_id; aws_config::AbstractAWSConfig=global_aws_config() +) + return qapps( + "POST", + "/apps.install", + Dict{String,Any}( + "appId" => appId, "headers" => Dict{String,Any}("instance-id" => instance_id) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function associate_qapp_with_user( + appId, + instance_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/apps.install", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "appId" => appId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_library_item(app_id, app_version, categories, instance-id) + create_library_item(app_id, app_version, categories, instance-id, params::Dict{String,<:Any}) + +Creates a new library item for an Amazon Q App, allowing it to be discovered and used by +other allowed users. + +# Arguments +- `app_id`: The unique identifier of the Amazon Q App to publish to the library. +- `app_version`: The version of the Amazon Q App to publish to the library. +- `categories`: The categories to associate with the library item for easier discovery. +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. + +""" +function create_library_item( + appId, + appVersion, + categories, + instance_id; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/catalog.createItem", + Dict{String,Any}( + "appId" => appId, + "appVersion" => appVersion, + "categories" => categories, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_library_item( + appId, + appVersion, + categories, + instance_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/catalog.createItem", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "appId" => appId, + "appVersion" => appVersion, + "categories" => categories, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_qapp(app_definition, instance-id, title) + create_qapp(app_definition, instance-id, title, params::Dict{String,<:Any}) + +Creates a new Amazon Q App based on the provided definition. The Q App definition specifies +the cards and flow of the Q App. This operation also calculates the dependencies between +the cards by inspecting the references in the prompts. + +# Arguments +- `app_definition`: The definition of the new Q App, specifying the cards and flow. +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. +- `title`: The title of the new Q App. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: The description of the new Q App. +- `"tags"`: Optional tags to associate with the new Q App. +""" +function create_qapp( + appDefinition, instance_id, title; aws_config::AbstractAWSConfig=global_aws_config() +) + return qapps( + "POST", + "/apps.create", + Dict{String,Any}( + "appDefinition" => appDefinition, + "title" => title, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_qapp( + appDefinition, + instance_id, + title, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/apps.create", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "appDefinition" => appDefinition, + "title" => title, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_library_item(instance-id, library_item_id) + delete_library_item(instance-id, library_item_id, params::Dict{String,<:Any}) + +Deletes a library item for an Amazon Q App, removing it from the library so it can no +longer be discovered or used by other users. + +# Arguments +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. +- `library_item_id`: The unique identifier of the library item to delete. + +""" +function delete_library_item( + instance_id, libraryItemId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qapps( + "POST", + "/catalog.deleteItem", + Dict{String,Any}( + "libraryItemId" => libraryItemId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_library_item( + instance_id, + libraryItemId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/catalog.deleteItem", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "libraryItemId" => libraryItemId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_qapp(app_id, instance-id) + delete_qapp(app_id, instance-id, params::Dict{String,<:Any}) + +Deletes an Amazon Q App owned by the user. If the Q App was previously published to the +library, it is also removed from the library. + +# Arguments +- `app_id`: The unique identifier of the Q App to delete. +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. + +""" +function delete_qapp(appId, instance_id; aws_config::AbstractAWSConfig=global_aws_config()) + return qapps( + "POST", + "/apps.delete", + Dict{String,Any}( + "appId" => appId, "headers" => Dict{String,Any}("instance-id" => instance_id) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_qapp( + appId, + instance_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/apps.delete", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "appId" => appId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + disassociate_library_item_review(instance-id, library_item_id) + disassociate_library_item_review(instance-id, library_item_id, params::Dict{String,<:Any}) + +Removes a rating or review previously submitted by the user for a library item. + +# Arguments +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. +- `library_item_id`: The unique identifier of the library item to remove the review from. + +""" +function disassociate_library_item_review( + instance_id, libraryItemId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qapps( + "POST", + "/catalog.disassociateItemRating", + Dict{String,Any}( + "libraryItemId" => libraryItemId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function disassociate_library_item_review( + instance_id, + libraryItemId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/catalog.disassociateItemRating", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "libraryItemId" => libraryItemId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + disassociate_qapp_from_user(app_id, instance-id) + disassociate_qapp_from_user(app_id, instance-id, params::Dict{String,<:Any}) + +Disassociates a Q App from a user removing the user's access to run the Q App. + +# Arguments +- `app_id`: The unique identifier of the Q App to disassociate from the user. +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. + +""" +function disassociate_qapp_from_user( + appId, instance_id; aws_config::AbstractAWSConfig=global_aws_config() +) + return qapps( + "POST", + "/apps.uninstall", + Dict{String,Any}( + "appId" => appId, "headers" => Dict{String,Any}("instance-id" => instance_id) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function disassociate_qapp_from_user( + appId, + instance_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/apps.uninstall", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "appId" => appId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_library_item(instance-id, library_item_id) + get_library_item(instance-id, library_item_id, params::Dict{String,<:Any}) + +Retrieves details about a library item for an Amazon Q App, including its metadata, +categories, ratings, and usage statistics. + +# Arguments +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. +- `library_item_id`: The unique identifier of the library item to retrieve. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"appId"`: The unique identifier of the Amazon Q App associated with the library item. +""" +function get_library_item( + instance_id, libraryItemId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qapps( + "GET", + "/catalog.getItem", + Dict{String,Any}( + "libraryItemId" => libraryItemId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_library_item( + instance_id, + libraryItemId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "GET", + "/catalog.getItem", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "libraryItemId" => libraryItemId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_qapp(app_id, instance-id) + get_qapp(app_id, instance-id, params::Dict{String,<:Any}) + +Retrieves the full details of an Q App, including its definition specifying the cards and +flow. + +# Arguments +- `app_id`: The unique identifier of the Q App to retrieve. +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. + +""" +function get_qapp(appId, instance_id; aws_config::AbstractAWSConfig=global_aws_config()) + return qapps( + "GET", + "/apps.get", + Dict{String,Any}( + "appId" => appId, "headers" => Dict{String,Any}("instance-id" => instance_id) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_qapp( + appId, + instance_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "GET", + "/apps.get", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "appId" => appId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_qapp_session(instance-id, session_id) + get_qapp_session(instance-id, session_id, params::Dict{String,<:Any}) + +Retrieves the current state and results for an active session of an Amazon Q App. + +# Arguments +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. +- `session_id`: The unique identifier of the Q App session to retrieve. + +""" +function get_qapp_session( + instance_id, sessionId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qapps( + "GET", + "/runtime.getQAppSession", + Dict{String,Any}( + "sessionId" => sessionId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_qapp_session( + instance_id, + sessionId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "GET", + "/runtime.getQAppSession", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "sessionId" => sessionId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + import_document(app_id, card_id, file_contents_base64, file_name, instance-id, scope) + import_document(app_id, card_id, file_contents_base64, file_name, instance-id, scope, params::Dict{String,<:Any}) + +Uploads a file that can then be used either as a default in a FileUploadCard from Q App +definition or as a file that is used inside a single Q App run. The purpose of the document +is determined by a scope parameter that indicates whether it is at the app definition level +or at the app session level. + +# Arguments +- `app_id`: The unique identifier of the Q App the file is associated with. +- `card_id`: The unique identifier of the card the file is associated with, if applicable. +- `file_contents_base64`: The base64-encoded contents of the file to upload. +- `file_name`: The name of the file being uploaded. +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. +- `scope`: Whether the file is associated with an Q App definition or a specific Q App + session. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"sessionId"`: The unique identifier of the Q App session the file is associated with, if + applicable. +""" +function import_document( + appId, + cardId, + fileContentsBase64, + fileName, + instance_id, + scope; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/apps.importDocument", + Dict{String,Any}( + "appId" => appId, + "cardId" => cardId, + "fileContentsBase64" => fileContentsBase64, + "fileName" => fileName, + "scope" => scope, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function import_document( + appId, + cardId, + fileContentsBase64, + fileName, + instance_id, + scope, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/apps.importDocument", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "appId" => appId, + "cardId" => cardId, + "fileContentsBase64" => fileContentsBase64, + "fileName" => fileName, + "scope" => scope, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_library_items(instance-id) + list_library_items(instance-id, params::Dict{String,<:Any}) + +Lists the library items for Amazon Q Apps that are published and available for users in +your Amazon Web Services account. + +# Arguments +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"categoryId"`: Optional category to filter the library items by. +- `"limit"`: The maximum number of library items to return in the response. +- `"nextToken"`: The token to request the next page of results. +""" +function list_library_items(instance_id; aws_config::AbstractAWSConfig=global_aws_config()) + return qapps( + "GET", + "/catalog.list", + Dict{String,Any}("headers" => Dict{String,Any}("instance-id" => instance_id)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_library_items( + instance_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "GET", + "/catalog.list", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "headers" => Dict{String,Any}("instance-id" => instance_id) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_qapps(instance-id) + list_qapps(instance-id, params::Dict{String,<:Any}) + +Lists the Amazon Q Apps owned by or associated with the user either because they created it +or because they used it from the library in the past. The user identity is extracted from +the credentials used to invoke this operation.. + +# Arguments +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"limit"`: The maximum number of Q Apps to return in the response. +- `"nextToken"`: The token to request the next page of results. +""" +function list_qapps(instance_id; aws_config::AbstractAWSConfig=global_aws_config()) + return qapps( + "GET", + "/apps.list", + Dict{String,Any}("headers" => Dict{String,Any}("instance-id" => instance_id)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_qapps( + instance_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "GET", + "/apps.list", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "headers" => Dict{String,Any}("instance-id" => instance_id) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_tags_for_resource(resource_arn) + list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) + +Lists the tags associated with an Amazon Q Apps resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the resource whose tags should be + listed. + +""" +function list_tags_for_resource( + resourceARN; aws_config::AbstractAWSConfig=global_aws_config() +) + return qapps( + "GET", + "/tags/$(resourceARN)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_tags_for_resource( + resourceARN, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "GET", + "/tags/$(resourceARN)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + predict_qapp(instance-id) + predict_qapp(instance-id, params::Dict{String,<:Any}) + +Generates an Amazon Q App definition based on either a conversation or a problem statement +provided as input.The resulting app definition can be used to call CreateQApp. This API +doesn't create Amazon Q Apps directly. + +# Arguments +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"options"`: The input to generate the Q App definition from, either a conversation or + problem statement. +""" +function predict_qapp(instance_id; aws_config::AbstractAWSConfig=global_aws_config()) + return qapps( + "POST", + "/apps.predictQApp", + Dict{String,Any}("headers" => Dict{String,Any}("instance-id" => instance_id)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function predict_qapp( + instance_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/apps.predictQApp", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "headers" => Dict{String,Any}("instance-id" => instance_id) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + start_qapp_session(app_id, app_version, instance-id) + start_qapp_session(app_id, app_version, instance-id, params::Dict{String,<:Any}) + +Starts a new session for an Amazon Q App, allowing inputs to be provided and the app to be +run. Each Q App session will be condensed into a single conversation in the web +experience. + +# Arguments +- `app_id`: The unique identifier of the Q App to start a session for. +- `app_version`: The version of the Q App to use for the session. +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"initialValues"`: Optional initial input values to provide for the Q App session. +- `"tags"`: Optional tags to associate with the new Q App session. +""" +function start_qapp_session( + appId, appVersion, instance_id; aws_config::AbstractAWSConfig=global_aws_config() +) + return qapps( + "POST", + "/runtime.startQAppSession", + Dict{String,Any}( + "appId" => appId, + "appVersion" => appVersion, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_qapp_session( + appId, + appVersion, + instance_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/runtime.startQAppSession", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "appId" => appId, + "appVersion" => appVersion, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + stop_qapp_session(instance-id, session_id) + stop_qapp_session(instance-id, session_id, params::Dict{String,<:Any}) + +Stops an active session for an Amazon Q App.This deletes all data related to the session +and makes it invalid for future uses. The results of the session will be persisted as part +of the conversation. + +# Arguments +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. +- `session_id`: The unique identifier of the Q App session to stop. + +""" +function stop_qapp_session( + instance_id, sessionId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qapps( + "POST", + "/runtime.deleteMiniAppRun", + Dict{String,Any}( + "sessionId" => sessionId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function stop_qapp_session( + instance_id, + sessionId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/runtime.deleteMiniAppRun", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "sessionId" => sessionId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + tag_resource(resource_arn, tags) + tag_resource(resource_arn, tags, params::Dict{String,<:Any}) + +Associates tags with an Amazon Q Apps resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the resource to tag. +- `tags`: The tags to associate with the resource. + +""" +function tag_resource(resourceARN, tags; aws_config::AbstractAWSConfig=global_aws_config()) + return qapps( + "POST", + "/tags/$(resourceARN)", + Dict{String,Any}("tags" => tags); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function tag_resource( + resourceARN, + tags, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/tags/$(resourceARN)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tags" => tags), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + untag_resource(resource_arn, tag_keys) + untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) + +Disassociates tags from an Amazon Q Apps resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the resource to disassociate the tag + from. +- `tag_keys`: The keys of the tags to disassociate from the resource. + +""" +function untag_resource( + resourceARN, tagKeys; aws_config::AbstractAWSConfig=global_aws_config() +) + return qapps( + "DELETE", + "/tags/$(resourceARN)", + Dict{String,Any}("tagKeys" => tagKeys); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function untag_resource( + resourceARN, + tagKeys, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "DELETE", + "/tags/$(resourceARN)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tagKeys" => tagKeys), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_library_item(instance-id, library_item_id) + update_library_item(instance-id, library_item_id, params::Dict{String,<:Any}) + +Updates the metadata and status of a library item for an Amazon Q App. + +# Arguments +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. +- `library_item_id`: The unique identifier of the library item to update. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"categories"`: The new categories to associate with the library item. +- `"status"`: The new status to set for the library item, such as \"Published\" or + \"Hidden\". +""" +function update_library_item( + instance_id, libraryItemId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qapps( + "POST", + "/catalog.updateItem", + Dict{String,Any}( + "libraryItemId" => libraryItemId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_library_item( + instance_id, + libraryItemId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/catalog.updateItem", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "libraryItemId" => libraryItemId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_qapp(app_id, instance-id) + update_qapp(app_id, instance-id, params::Dict{String,<:Any}) + +Updates an existing Amazon Q App, allowing modifications to its title, description, and +definition. + +# Arguments +- `app_id`: The unique identifier of the Q App to update. +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"appDefinition"`: The new definition specifying the cards and flow for the Q App. +- `"description"`: The new description for the Q App. +- `"title"`: The new title for the Q App. +""" +function update_qapp(appId, instance_id; aws_config::AbstractAWSConfig=global_aws_config()) + return qapps( + "POST", + "/apps.update", + Dict{String,Any}( + "appId" => appId, "headers" => Dict{String,Any}("instance-id" => instance_id) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_qapp( + appId, + instance_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/apps.update", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "appId" => appId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_qapp_session(instance-id, session_id) + update_qapp_session(instance-id, session_id, params::Dict{String,<:Any}) + +Updates the session for a given Q App sessionId. This is only valid when at least one card +of the session is in the WAITING state. Data for each WAITING card can be provided as +input. If inputs are not provided, the call will be accepted but session will not move +forward. Inputs for cards that are not in the WAITING status will be ignored. + +# Arguments +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. +- `session_id`: The unique identifier of the Q App session to provide input for. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"values"`: The input values to provide for the current state of the Q App session. +""" +function update_qapp_session( + instance_id, sessionId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qapps( + "POST", + "/runtime.updateQAppSession", + Dict{String,Any}( + "sessionId" => sessionId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_qapp_session( + instance_id, + sessionId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/runtime.updateQAppSession", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "sessionId" => sessionId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/qbusiness.jl b/src/services/qbusiness.jl index 9fb58ab1a..4ef143385 100644 --- a/src/services/qbusiness.jl +++ b/src/services/qbusiness.jl @@ -191,6 +191,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"identityCenterInstanceArn"`: The Amazon Resource Name (ARN) of the IAM Identity Center instance you are either creating for—or connecting to—your Amazon Q Business application. +- `"personalizationConfiguration"`: Configuration information about chat response + personalization. For more information, see Personalizing chat responses - `"qAppsConfiguration"`: An option to allow end users to create and use Amazon Q Apps in the web experience. - `"roleArn"`: The Amazon Resource Name (ARN) of an IAM role with permissions to access @@ -2140,6 +2142,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"identityCenterInstanceArn"`: The Amazon Resource Name (ARN) of the IAM Identity Center instance you are either creating for—or connecting to—your Amazon Q Business application. +- `"personalizationConfiguration"`: Configuration information about chat response + personalization. For more information, see Personalizing chat responses. - `"qAppsConfiguration"`: An option to allow end users to create and use Amazon Q Apps in the web experience. - `"roleArn"`: An Amazon Web Services Identity and Access Management (IAM) role that gives diff --git a/src/services/qconnect.jl b/src/services/qconnect.jl index 29c813e3b..1680362d5 100644 --- a/src/services/qconnect.jl +++ b/src/services/qconnect.jl @@ -195,6 +195,80 @@ function create_content( ) end +""" + create_content_association(association, association_type, content_id, knowledge_base_id) + create_content_association(association, association_type, content_id, knowledge_base_id, params::Dict{String,<:Any}) + +Creates an association between a content resource in a knowledge base and step-by-step +guides. Step-by-step guides offer instructions to agents for resolving common customer +issues. You create a content association to integrate Amazon Q in Connect and step-by-step +guides. After you integrate Amazon Q and step-by-step guides, when Amazon Q provides a +recommendation to an agent based on the intent that it's detected, it also provides them +with the option to start the step-by-step guide that you have associated with the content. +Note the following limitations: You can create only one content association for each +content resource in a knowledge base. You can associate a step-by-step guide with +multiple content resources. For more information, see Integrate Amazon Q in Connect with +step-by-step guides in the Amazon Connect Administrator Guide. + +# Arguments +- `association`: The identifier of the associated resource. +- `association_type`: The type of association. +- `content_id`: The identifier of the content. +- `knowledge_base_id`: The identifier of the knowledge base. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier that you provide to ensure the + idempotency of the request. If not provided, the Amazon Web Services SDK populates this + field. For more information about idempotency, see Making retries safe with idempotent APIs. +- `"tags"`: The tags used to organize, track, or control access for this resource. +""" +function create_content_association( + association, + associationType, + contentId, + knowledgeBaseId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "POST", + "/knowledgeBases/$(knowledgeBaseId)/contents/$(contentId)/associations", + Dict{String,Any}( + "association" => association, + "associationType" => associationType, + "clientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_content_association( + association, + associationType, + contentId, + knowledgeBaseId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "POST", + "/knowledgeBases/$(knowledgeBaseId)/contents/$(contentId)/associations", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "association" => association, + "associationType" => associationType, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_knowledge_base(knowledge_base_type, name) create_knowledge_base(knowledge_base_type, name, params::Dict{String,<:Any}) @@ -502,6 +576,50 @@ function delete_content( ) end +""" + delete_content_association(content_association_id, content_id, knowledge_base_id) + delete_content_association(content_association_id, content_id, knowledge_base_id, params::Dict{String,<:Any}) + +Deletes the content association. For more information about content associations--what +they are and when they are used--see Integrate Amazon Q in Connect with step-by-step guides +in the Amazon Connect Administrator Guide. + +# Arguments +- `content_association_id`: The identifier of the content association. Can be either the ID + or the ARN. URLs cannot contain the ARN. +- `content_id`: The identifier of the content. +- `knowledge_base_id`: The identifier of the knowledge base. + +""" +function delete_content_association( + contentAssociationId, + contentId, + knowledgeBaseId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "DELETE", + "/knowledgeBases/$(knowledgeBaseId)/contents/$(contentId)/associations/$(contentAssociationId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_content_association( + contentAssociationId, + contentId, + knowledgeBaseId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "DELETE", + "/knowledgeBases/$(knowledgeBaseId)/contents/$(contentId)/associations/$(contentAssociationId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_import_job(import_job_id, knowledge_base_id) delete_import_job(import_job_id, knowledge_base_id, params::Dict{String,<:Any}) @@ -725,6 +843,50 @@ function get_content( ) end +""" + get_content_association(content_association_id, content_id, knowledge_base_id) + get_content_association(content_association_id, content_id, knowledge_base_id, params::Dict{String,<:Any}) + +Returns the content association. For more information about content associations--what they +are and when they are used--see Integrate Amazon Q in Connect with step-by-step guides in +the Amazon Connect Administrator Guide. + +# Arguments +- `content_association_id`: The identifier of the content association. Can be either the ID + or the ARN. URLs cannot contain the ARN. +- `content_id`: The identifier of the content. +- `knowledge_base_id`: The identifier of the knowledge base. + +""" +function get_content_association( + contentAssociationId, + contentId, + knowledgeBaseId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "GET", + "/knowledgeBases/$(knowledgeBaseId)/contents/$(contentId)/associations/$(contentAssociationId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_content_association( + contentAssociationId, + contentId, + knowledgeBaseId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "GET", + "/knowledgeBases/$(knowledgeBaseId)/contents/$(contentId)/associations/$(contentAssociationId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_content_summary(content_id, knowledge_base_id) get_content_summary(content_id, knowledge_base_id, params::Dict{String,<:Any}) @@ -1026,6 +1188,49 @@ function list_assistants( ) end +""" + list_content_associations(content_id, knowledge_base_id) + list_content_associations(content_id, knowledge_base_id, params::Dict{String,<:Any}) + +Lists the content associations. For more information about content associations--what they +are and when they are used--see Integrate Amazon Q in Connect with step-by-step guides in +the Amazon Connect Administrator Guide. + +# Arguments +- `content_id`: The identifier of the content. +- `knowledge_base_id`: The identifier of the knowledge base. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return per page. +- `"nextToken"`: The token for the next set of results. Use the value returned in the + previous response in the next request to retrieve the next set of results. +""" +function list_content_associations( + contentId, knowledgeBaseId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qconnect( + "GET", + "/knowledgeBases/$(knowledgeBaseId)/contents/$(contentId)/associations"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_content_associations( + contentId, + knowledgeBaseId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "GET", + "/knowledgeBases/$(knowledgeBaseId)/contents/$(contentId)/associations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_contents(knowledge_base_id) list_contents(knowledge_base_id, params::Dict{String,<:Any}) diff --git a/src/services/rds.jl b/src/services/rds.jl index aaddc12b2..561848b67 100644 --- a/src/services/rds.jl +++ b/src/services/rds.jl @@ -1146,7 +1146,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"DBSubnetGroupName"`: A DB subnet group to associate with this DB cluster. This setting is required to create a Multi-AZ DB cluster. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Constraints: Must match the name of an existing DB subnet group. - Must not be default. Example: mydbsubnetgroup + Example: mydbsubnetgroup - `"DBSystemId"`: Reserved for future use. - `"DatabaseName"`: The name for your database of up to 64 alphanumeric characters. A database named postgres is always created. If this parameter is specified, an additional @@ -11051,11 +11051,10 @@ end Starts an export of DB snapshot or DB cluster data to Amazon S3. The provided IAM role must have access to the S3 bucket. You can't export snapshot data from Db2 or RDS Custom DB -instances. You can't export cluster data from Multi-AZ DB clusters. For more information on -exporting DB snapshot data, see Exporting DB snapshot data to Amazon S3 in the Amazon RDS -User Guide or Exporting DB cluster snapshot data to Amazon S3 in the Amazon Aurora User -Guide. For more information on exporting DB cluster data, see Exporting DB cluster data to -Amazon S3 in the Amazon Aurora User Guide. +instances. For more information on exporting DB snapshot data, see Exporting DB snapshot +data to Amazon S3 in the Amazon RDS User Guide or Exporting DB cluster snapshot data to +Amazon S3 in the Amazon Aurora User Guide. For more information on exporting DB cluster +data, see Exporting DB cluster data to Amazon S3 in the Amazon Aurora User Guide. # Arguments - `export_task_identifier`: A unique identifier for the export task. This ID isn't an diff --git a/src/services/rekognition.jl b/src/services/rekognition.jl index ea1220c2f..cc561675d 100644 --- a/src/services/rekognition.jl +++ b/src/services/rekognition.jl @@ -349,6 +349,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys existing dataset or specify the Amazon S3 bucket location of an Amazon Sagemaker format manifest file. If you don't specify datasetSource, an empty dataset is created. To add labeled images to the dataset, You can use the console or call UpdateDatasetEntries. +- `"Tags"`: A set of tags (key-value pairs) that you want to attach to the dataset. """ function create_dataset( DatasetType, ProjectArn; aws_config::AbstractAWSConfig=global_aws_config() @@ -441,6 +442,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys for Content Moderation. Applicable only to adapters. - `"Feature"`: Specifies feature that is being customized. If no value is provided CUSTOM_LABELS is used as a default. +- `"Tags"`: A set of tags (key-value pairs) that you want to attach to the project. """ function create_project(ProjectName; aws_config::AbstractAWSConfig=global_aws_config()) return rekognition( @@ -2148,7 +2150,9 @@ in the sample seen below. Use MaxResults parameter to limit the number of label If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetlabelDetection and populate the NextToken request -parameter with the token value returned from the previous call to GetLabelDetection. +parameter with the token value returned from the previous call to GetLabelDetection. If you +are retrieving results while using the Amazon Simple Notification Service, note that you +will receive an \"ERROR\" notification if the job encounters an issue. # Arguments - `job_id`: Job identifier for the label detection operation for which you want results diff --git a/src/services/s3.jl b/src/services/s3.jl index d9db5a265..41a5ef3d1 100644 --- a/src/services/s3.jl +++ b/src/services/s3.jl @@ -301,30 +301,29 @@ Amazon Web Services Identity and Access Management (IAM) identity-based policies Express One Zone in the Amazon S3 User Guide. Response and special errors When the request is an HTTP 1.1 request, the response is chunk encoded. When the request is not an HTTP 1.1 request, the response would not contain the Content-Length. You always need to -read the entire response body to check if the copy succeeds. to keep the connection alive -while we copy the data. If the copy is successful, you receive a response with -information about the copied object. A copy request might return an error when Amazon S3 -receives the copy request or while Amazon S3 is copying the files. A 200 OK response can -contain either a success or an error. If the error occurs before the copy action starts, -you receive a standard Amazon S3 error. If the error occurs during the copy operation, -the error response is embedded in the 200 OK response. For example, in a cross-region copy, -you may encounter throttling and receive a 200 OK response. For more information, see -Resolve the Error 200 response when copying objects to Amazon S3. The 200 OK status code -means the copy was accepted, but it doesn't mean the copy is complete. Another example is -when you disconnect from Amazon S3 before the copy is complete, Amazon S3 might cancel the -copy and you may receive a 200 OK response. You must stay connected to Amazon S3 until the -entire response is successfully received and processed. If you call this API operation -directly, make sure to design your application to parse the content of the response and -handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. -The SDKs detect the embedded error and apply error handling per your configuration settings -(including automatically retrying the request as appropriate). If the condition persists, -the SDKs throw an exception (or, for the SDKs that don't use exceptions, they return an -error). Charge The copy request charge is based on the storage class and Region that -you specify for the destination object. The request can also result in a data retrieval -charge for the source if the source storage class bills for data retrieval. If the copy -source is in a different region, the data transfer is billed to the copy source account. -For pricing information, see Amazon S3 pricing. HTTP Host header syntax Directory -buckets - The HTTP Host header syntax is +read the entire response body to check if the copy succeeds. If the copy is successful, +you receive a response with information about the copied object. A copy request might +return an error when Amazon S3 receives the copy request or while Amazon S3 is copying the +files. A 200 OK response can contain either a success or an error. If the error occurs +before the copy action starts, you receive a standard Amazon S3 error. If the error +occurs during the copy operation, the error response is embedded in the 200 OK response. +For example, in a cross-region copy, you may encounter throttling and receive a 200 OK +response. For more information, see Resolve the Error 200 response when copying objects to +Amazon S3. The 200 OK status code means the copy was accepted, but it doesn't mean the copy +is complete. Another example is when you disconnect from Amazon S3 before the copy is +complete, Amazon S3 might cancel the copy and you may receive a 200 OK response. You must +stay connected to Amazon S3 until the entire response is successfully received and +processed. If you call this API operation directly, make sure to design your application to +parse the content of the response and handle it appropriately. If you use Amazon Web +Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply +error handling per your configuration settings (including automatically retrying the +request as appropriate). If the condition persists, the SDKs throw an exception (or, for +the SDKs that don't use exceptions, they return an error). Charge The copy request +charge is based on the storage class and Region that you specify for the destination +object. The request can also result in a data retrieval charge for the source if the source +storage class bills for data retrieval. If the copy source is in a different region, the +data transfer is billed to the copy source account. For pricing information, see Amazon S3 +pricing. HTTP Host header syntax Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. The following operations are related to CopyObject: PutObject GetObject @@ -2012,7 +2011,7 @@ Permissions General purpose bucket permissions - The following permissions a in your policies when your DeleteObjects request includes specific headers. s3:DeleteObject - To delete an object from a bucket, you must always specify the s3:DeleteObject permission. s3:DeleteObjectVersion - To delete a specific version of -an object from a versiong-enabled bucket, you must specify the s3:DeleteObjectVersion +an object from a versioning-enabled bucket, you must specify the s3:DeleteObjectVersion permission. Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission @@ -4150,6 +4149,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"partNumber"`: Part number of the object being read. This is a positive integer between 1 and 10,000. Effectively performs a 'ranged' HEAD request for the part specified. Useful querying about the size of the part and the number of parts in this object. +- `"response-cache-control"`: Sets the Cache-Control header of the response. +- `"response-content-disposition"`: Sets the Content-Disposition header of the response. +- `"response-content-encoding"`: Sets the Content-Encoding header of the response. +- `"response-content-language"`: Sets the Content-Language header of the response. +- `"response-content-type"`: Sets the Content-Type header of the response. +- `"response-expires"`: Sets the Expires header of the response. - `"versionId"`: Version ID used to reference a specific version of the object. For directory buckets in this API operation, only the null value of the version ID is supported. @@ -7804,12 +7809,12 @@ bucket, you must have the s3:GetObject permission to read the source object th copied. If the destination bucket is a general purpose bucket, you must have the s3:PutObject permission to write the object copy to the destination bucket. For information about permissions required to use the multipart upload API, see Multipart -Upload and Permissions in the Amazon S3 User Guide. Directory bucket permissions - You -must have permissions in a bucket policy or an IAM identity-based policy based on the +upload API and permissions in the Amazon S3 User Guide. Directory bucket permissions - +You must have permissions in a bucket policy or an IAM identity-based policy based on the source and destination bucket types in an UploadPartCopy operation. If the source object that you want to copy is in a directory bucket, you must have the s3express:CreateSession -permission in the Action element of a policy to read the object . By default, the session -is in the ReadWrite mode. If you want to restrict the access, you can explicitly set the +permission in the Action element of a policy to read the object. By default, the session is +in the ReadWrite mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode condition key to ReadOnly on the copy source bucket. If the copy destination is a directory bucket, you must have the s3express:CreateSession permission in the Action element of a policy to write the object to the destination. The diff --git a/src/services/sagemaker.jl b/src/services/sagemaker.jl index bf951fe78..91bd67d20 100644 --- a/src/services/sagemaker.jl +++ b/src/services/sagemaker.jl @@ -3532,6 +3532,101 @@ function create_notebook_instance_lifecycle_config( ) end +""" + create_optimization_job(deployment_instance_type, model_source, optimization_configs, optimization_job_name, output_config, role_arn, stopping_condition) + create_optimization_job(deployment_instance_type, model_source, optimization_configs, optimization_job_name, output_config, role_arn, stopping_condition, params::Dict{String,<:Any}) + +Creates a job that optimizes a model for inference performance. To create the job, you +provide the location of a source model, and you provide the settings for the optimization +techniques that you want the job to apply. When the job completes successfully, SageMaker +uploads the new optimized model to the output destination that you specify. For more +information about how to use this action, and about the supported optimization techniques, +see Optimize model inference with Amazon SageMaker. + +# Arguments +- `deployment_instance_type`: The type of instance that hosts the optimized model that you + create with the optimization job. +- `model_source`: The location of the source model to optimize with an optimization job. +- `optimization_configs`: Settings for each of the optimization techniques that the job + applies. +- `optimization_job_name`: A custom name for the new optimization job. +- `output_config`: Details for where to store the optimized model that you create with the + optimization job. +- `role_arn`: The Amazon Resource Name (ARN) of an IAM role that enables Amazon SageMaker + to perform tasks on your behalf. During model optimization, Amazon SageMaker needs your + permission to: Read input data from an S3 bucket Write model artifacts to an S3 bucket + Write logs to Amazon CloudWatch Logs Publish metrics to Amazon CloudWatch You grant + permissions for all of these tasks to an IAM role. To pass this role to Amazon SageMaker, + the caller of this API must have the iam:PassRole permission. For more information, see + Amazon SageMaker Roles. +- `stopping_condition`: + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"OptimizationEnvironment"`: The environment variables to set in the model container. +- `"Tags"`: A list of key-value pairs associated with the optimization job. For more + information, see Tagging Amazon Web Services resources in the Amazon Web Services General + Reference Guide. +- `"VpcConfig"`: A VPC in Amazon VPC that your optimized model has access to. +""" +function create_optimization_job( + DeploymentInstanceType, + ModelSource, + OptimizationConfigs, + OptimizationJobName, + OutputConfig, + RoleArn, + StoppingCondition; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sagemaker( + "CreateOptimizationJob", + Dict{String,Any}( + "DeploymentInstanceType" => DeploymentInstanceType, + "ModelSource" => ModelSource, + "OptimizationConfigs" => OptimizationConfigs, + "OptimizationJobName" => OptimizationJobName, + "OutputConfig" => OutputConfig, + "RoleArn" => RoleArn, + "StoppingCondition" => StoppingCondition, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_optimization_job( + DeploymentInstanceType, + ModelSource, + OptimizationConfigs, + OptimizationJobName, + OutputConfig, + RoleArn, + StoppingCondition, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sagemaker( + "CreateOptimizationJob", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "DeploymentInstanceType" => DeploymentInstanceType, + "ModelSource" => ModelSource, + "OptimizationConfigs" => OptimizationConfigs, + "OptimizationJobName" => OptimizationJobName, + "OutputConfig" => OutputConfig, + "RoleArn" => RoleArn, + "StoppingCondition" => StoppingCondition, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_pipeline(client_request_token, pipeline_name, role_arn) create_pipeline(client_request_token, pipeline_name, role_arn, params::Dict{String,<:Any}) @@ -5503,7 +5598,8 @@ Delete a hub content reference in order to remove a model from a private hub. # Arguments - `hub_content_name`: The name of the hub content to delete. -- `hub_content_type`: The type of hub content to delete. +- `hub_content_type`: The type of hub content reference to delete. The only supported type + of hub content reference to delete is ModelReference. - `hub_name`: The name of the hub to delete the hub content reference from. """ @@ -6240,6 +6336,45 @@ function delete_notebook_instance_lifecycle_config( ) end +""" + delete_optimization_job(optimization_job_name) + delete_optimization_job(optimization_job_name, params::Dict{String,<:Any}) + +Deletes an optimization job. + +# Arguments +- `optimization_job_name`: The name that you assigned to the optimization job. + +""" +function delete_optimization_job( + OptimizationJobName; aws_config::AbstractAWSConfig=global_aws_config() +) + return sagemaker( + "DeleteOptimizationJob", + Dict{String,Any}("OptimizationJobName" => OptimizationJobName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_optimization_job( + OptimizationJobName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sagemaker( + "DeleteOptimizationJob", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("OptimizationJobName" => OptimizationJobName), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_pipeline(client_request_token, pipeline_name) delete_pipeline(client_request_token, pipeline_name, params::Dict{String,<:Any}) @@ -8493,6 +8628,45 @@ function describe_notebook_instance_lifecycle_config( ) end +""" + describe_optimization_job(optimization_job_name) + describe_optimization_job(optimization_job_name, params::Dict{String,<:Any}) + +Provides the properties of the specified optimization job. + +# Arguments +- `optimization_job_name`: The name that you assigned to the optimization job. + +""" +function describe_optimization_job( + OptimizationJobName; aws_config::AbstractAWSConfig=global_aws_config() +) + return sagemaker( + "DescribeOptimizationJob", + Dict{String,Any}("OptimizationJobName" => OptimizationJobName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_optimization_job( + OptimizationJobName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sagemaker( + "DescribeOptimizationJob", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("OptimizationJobName" => OptimizationJobName), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_pipeline(pipeline_name) describe_pipeline(pipeline_name, params::Dict{String,<:Any}) @@ -11782,6 +11956,53 @@ function list_notebook_instances( ) end +""" + list_optimization_jobs() + list_optimization_jobs(params::Dict{String,<:Any}) + +Lists the optimization jobs in your account and their properties. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"CreationTimeAfter"`: Filters the results to only those optimization jobs that were + created after the specified time. +- `"CreationTimeBefore"`: Filters the results to only those optimization jobs that were + created before the specified time. +- `"LastModifiedTimeAfter"`: Filters the results to only those optimization jobs that were + updated after the specified time. +- `"LastModifiedTimeBefore"`: Filters the results to only those optimization jobs that were + updated before the specified time. +- `"MaxResults"`: The maximum number of optimization jobs to return in the response. The + default is 50. +- `"NameContains"`: Filters the results to only those optimization jobs with a name that + contains the specified string. +- `"NextToken"`: A token that you use to get the next set of results following a truncated + response. If the response to the previous request was truncated, that response provides the + value for this token. +- `"OptimizationContains"`: Filters the results to only those optimization jobs that apply + the specified optimization techniques. You can specify either Quantization or Compilation. +- `"SortBy"`: The field by which to sort the optimization jobs in the response. The default + is CreationTime +- `"SortOrder"`: The sort order for results. The default is Ascending +- `"StatusEquals"`: Filters the results to only those optimization jobs with the specified + status. +""" +function list_optimization_jobs(; aws_config::AbstractAWSConfig=global_aws_config()) + return sagemaker( + "ListOptimizationJobs"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_optimization_jobs( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return sagemaker( + "ListOptimizationJobs", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_pipeline_execution_steps() list_pipeline_execution_steps(params::Dict{String,<:Any}) @@ -13663,6 +13884,45 @@ function stop_notebook_instance( ) end +""" + stop_optimization_job(optimization_job_name) + stop_optimization_job(optimization_job_name, params::Dict{String,<:Any}) + +Ends a running inference optimization job. + +# Arguments +- `optimization_job_name`: The name that you assigned to the optimization job. + +""" +function stop_optimization_job( + OptimizationJobName; aws_config::AbstractAWSConfig=global_aws_config() +) + return sagemaker( + "StopOptimizationJob", + Dict{String,Any}("OptimizationJobName" => OptimizationJobName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function stop_optimization_job( + OptimizationJobName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sagemaker( + "StopOptimizationJob", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("OptimizationJobName" => OptimizationJobName), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ stop_pipeline_execution(client_request_token, pipeline_execution_arn) stop_pipeline_execution(client_request_token, pipeline_execution_arn, params::Dict{String,<:Any}) diff --git a/src/services/workspaces.jl b/src/services/workspaces.jl index 2d5b341c5..82b4035df 100644 --- a/src/services/workspaces.jl +++ b/src/services/workspaces.jl @@ -756,6 +756,75 @@ function create_workspaces( ) end +""" + create_workspaces_pool(bundle_id, capacity, description, directory_id, pool_name) + create_workspaces_pool(bundle_id, capacity, description, directory_id, pool_name, params::Dict{String,<:Any}) + +Creates a pool of WorkSpaces. + +# Arguments +- `bundle_id`: The identifier of the bundle for the pool. +- `capacity`: The user capacity of the pool. +- `description`: The pool description. +- `directory_id`: The identifier of the directory for the pool. +- `pool_name`: The name of the pool. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ApplicationSettings"`: Indicates the application settings of the pool. +- `"Tags"`: The tags for the pool. +- `"TimeoutSettings"`: Indicates the timeout settings of the pool. +""" +function create_workspaces_pool( + BundleId, + Capacity, + Description, + DirectoryId, + PoolName; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return workspaces( + "CreateWorkspacesPool", + Dict{String,Any}( + "BundleId" => BundleId, + "Capacity" => Capacity, + "Description" => Description, + "DirectoryId" => DirectoryId, + "PoolName" => PoolName, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_workspaces_pool( + BundleId, + Capacity, + Description, + DirectoryId, + PoolName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return workspaces( + "CreateWorkspacesPool", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "BundleId" => BundleId, + "Capacity" => Capacity, + "Description" => Description, + "DirectoryId" => DirectoryId, + "PoolName" => PoolName, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_account_link_invitation(link_id) delete_account_link_invitation(link_id, params::Dict{String,<:Any}) @@ -1698,6 +1767,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"Limit"`: The maximum number of directories to return. - `"NextToken"`: If you received a NextToken from a previous call that was paginated, provide this token to receive the next set of results. +- `"WorkspaceDirectoryNames"`: The names of the WorkSpace directories. """ function describe_workspace_directories(; aws_config::AbstractAWSConfig=global_aws_config()) return workspaces( @@ -1890,6 +1960,73 @@ function describe_workspaces_connection_status( ) end +""" + describe_workspaces_pool_sessions(pool_id) + describe_workspaces_pool_sessions(pool_id, params::Dict{String,<:Any}) + +Retrieves a list that describes the streaming sessions for a specified pool. + +# Arguments +- `pool_id`: The identifier of the pool. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Limit"`: The maximum number of items to return. +- `"NextToken"`: If you received a NextToken from a previous call that was paginated, + provide this token to receive the next set of results. +- `"UserId"`: The identifier of the user. +""" +function describe_workspaces_pool_sessions( + PoolId; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces( + "DescribeWorkspacesPoolSessions", + Dict{String,Any}("PoolId" => PoolId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_workspaces_pool_sessions( + PoolId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces( + "DescribeWorkspacesPoolSessions", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("PoolId" => PoolId), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + describe_workspaces_pools() + describe_workspaces_pools(params::Dict{String,<:Any}) + +Describes the specified WorkSpaces Pools. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Filters"`: The filter conditions for the WorkSpaces Pool to return. +- `"Limit"`: The maximum number of items to return. +- `"NextToken"`: If you received a NextToken from a previous call that was paginated, + provide this token to receive the next set of results. +- `"PoolIds"`: The identifier of the WorkSpaces Pools. +""" +function describe_workspaces_pools(; aws_config::AbstractAWSConfig=global_aws_config()) + return workspaces( + "DescribeWorkspacesPools"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function describe_workspaces_pools( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces( + "DescribeWorkspacesPools", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ disassociate_connection_alias(alias_id) disassociate_connection_alias(alias_id, params::Dict{String,<:Any}) @@ -2501,6 +2638,44 @@ function modify_selfservice_permissions( ) end +""" + modify_streaming_properties(resource_id) + modify_streaming_properties(resource_id, params::Dict{String,<:Any}) + +Modifies the specified streaming properties. + +# Arguments +- `resource_id`: The identifier of the resource. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"StreamingProperties"`: The streaming properties to configure. +""" +function modify_streaming_properties( + ResourceId; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces( + "ModifyStreamingProperties", + Dict{String,Any}("ResourceId" => ResourceId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function modify_streaming_properties( + ResourceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return workspaces( + "ModifyStreamingProperties", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ResourceId" => ResourceId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ modify_workspace_access_properties(resource_id, workspace_access_properties) modify_workspace_access_properties(resource_id, workspace_access_properties, params::Dict{String,<:Any}) @@ -2775,29 +2950,28 @@ function rebuild_workspaces( end """ - register_workspace_directory(directory_id, enable_work_docs) - register_workspace_directory(directory_id, enable_work_docs, params::Dict{String,<:Any}) + register_workspace_directory() + register_workspace_directory(params::Dict{String,<:Any}) Registers the specified directory. This operation is asynchronous and returns before the WorkSpace directory is registered. If this is the first time you are registering a directory, you will need to create the workspaces_DefaultRole role before you can register a directory. For more information, see Creating the workspaces_DefaultRole Role. -# Arguments -- `directory_id`: The identifier of the directory. You cannot register a directory if it +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ActiveDirectoryConfig"`: The active directory config of the directory. +- `"DirectoryId"`: The identifier of the directory. You cannot register a directory if it does not have a status of Active. If the directory does not have a status of Active, you will receive an InvalidResourceStateException error. If you have already registered the maximum number of directories that you can register with Amazon WorkSpaces, you will receive a ResourceLimitExceededException error. Deregister directories that you are not using for WorkSpaces, and try again. -- `enable_work_docs`: Indicates whether Amazon WorkDocs is enabled or disabled. If you have - enabled this parameter and WorkDocs is not available in the Region, you will receive an - OperationNotSupportedException error. Set EnableWorkDocs to disabled, and try again. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"EnableSelfService"`: Indicates whether self-service capabilities are enabled or disabled. +- `"EnableWorkDocs"`: Indicates whether Amazon WorkDocs is enabled or disabled. If you have + enabled this parameter and WorkDocs is not available in the Region, you will receive an + OperationNotSupportedException error. Set EnableWorkDocs to disabled, and try again. - `"SubnetIds"`: The identifiers of the subnets for your virtual private cloud (VPC). Make sure that the subnets are in supported Availability Zones. The subnets must also be in separate Availability Zones. If these conditions are not met, you will receive an @@ -2808,34 +2982,22 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Web Services account must be enabled for BYOL. If your account has not been enabled for BYOL, you will receive an InvalidParameterValuesException error. For more information about BYOL images, see Bring Your Own Windows Desktop Images. +- `"UserIdentityType"`: The type of identity management the user is using. +- `"WorkspaceDirectoryDescription"`: Description of the directory to register. +- `"WorkspaceDirectoryName"`: The name of the directory to register. +- `"WorkspaceType"`: Indicates whether the directory's WorkSpace type is personal or pools. """ -function register_workspace_directory( - DirectoryId, EnableWorkDocs; aws_config::AbstractAWSConfig=global_aws_config() -) +function register_workspace_directory(; aws_config::AbstractAWSConfig=global_aws_config()) return workspaces( - "RegisterWorkspaceDirectory", - Dict{String,Any}("DirectoryId" => DirectoryId, "EnableWorkDocs" => EnableWorkDocs); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, + "RegisterWorkspaceDirectory"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET ) end function register_workspace_directory( - DirectoryId, - EnableWorkDocs, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() ) return workspaces( "RegisterWorkspaceDirectory", - Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}( - "DirectoryId" => DirectoryId, "EnableWorkDocs" => EnableWorkDocs - ), - params, - ), - ); + params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) @@ -2993,6 +3155,36 @@ function start_workspaces( ) end +""" + start_workspaces_pool(pool_id) + start_workspaces_pool(pool_id, params::Dict{String,<:Any}) + +Starts the specified pool. You cannot start a pool unless it has a running mode of AutoStop +and a state of STOPPED. + +# Arguments +- `pool_id`: The identifier of the pool. + +""" +function start_workspaces_pool(PoolId; aws_config::AbstractAWSConfig=global_aws_config()) + return workspaces( + "StartWorkspacesPool", + Dict{String,Any}("PoolId" => PoolId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_workspaces_pool( + PoolId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces( + "StartWorkspacesPool", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("PoolId" => PoolId), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ stop_workspaces(stop_workspace_requests) stop_workspaces(stop_workspace_requests, params::Dict{String,<:Any}) @@ -3033,6 +3225,36 @@ function stop_workspaces( ) end +""" + stop_workspaces_pool(pool_id) + stop_workspaces_pool(pool_id, params::Dict{String,<:Any}) + +Stops the specified pool. You cannot stop a WorkSpace pool unless it has a running mode of +AutoStop and a state of AVAILABLE, IMPAIRED, UNHEALTHY, or ERROR. + +# Arguments +- `pool_id`: The identifier of the pool. + +""" +function stop_workspaces_pool(PoolId; aws_config::AbstractAWSConfig=global_aws_config()) + return workspaces( + "StopWorkspacesPool", + Dict{String,Any}("PoolId" => PoolId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function stop_workspaces_pool( + PoolId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces( + "StopWorkspacesPool", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("PoolId" => PoolId), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ terminate_workspaces(terminate_workspace_requests) terminate_workspaces(terminate_workspace_requests, params::Dict{String,<:Any}) @@ -3090,6 +3312,72 @@ function terminate_workspaces( ) end +""" + terminate_workspaces_pool(pool_id) + terminate_workspaces_pool(pool_id, params::Dict{String,<:Any}) + +Terminates the specified pool. + +# Arguments +- `pool_id`: The identifier of the pool. + +""" +function terminate_workspaces_pool( + PoolId; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces( + "TerminateWorkspacesPool", + Dict{String,Any}("PoolId" => PoolId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function terminate_workspaces_pool( + PoolId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces( + "TerminateWorkspacesPool", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("PoolId" => PoolId), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + terminate_workspaces_pool_session(session_id) + terminate_workspaces_pool_session(session_id, params::Dict{String,<:Any}) + +Terminates the pool session. + +# Arguments +- `session_id`: The identifier of the pool session. + +""" +function terminate_workspaces_pool_session( + SessionId; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces( + "TerminateWorkspacesPoolSession", + Dict{String,Any}("SessionId" => SessionId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function terminate_workspaces_pool_session( + SessionId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return workspaces( + "TerminateWorkspacesPoolSession", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("SessionId" => SessionId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_connect_client_add_in(add_in_id, resource_id) update_connect_client_add_in(add_in_id, resource_id, params::Dict{String,<:Any}) @@ -3334,3 +3622,40 @@ function update_workspace_image_permission( feature_set=SERVICE_FEATURE_SET, ) end + +""" + update_workspaces_pool(pool_id) + update_workspaces_pool(pool_id, params::Dict{String,<:Any}) + +Updates the specified pool. + +# Arguments +- `pool_id`: The identifier of the specified pool to update. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ApplicationSettings"`: The persistent application settings for users in the pool. +- `"BundleId"`: The identifier of the bundle. +- `"Capacity"`: The desired capacity for the pool. +- `"Description"`: Describes the specified pool to update. +- `"DirectoryId"`: The identifier of the directory. +- `"TimeoutSettings"`: Indicates the timeout settings of the specified pool. +""" +function update_workspaces_pool(PoolId; aws_config::AbstractAWSConfig=global_aws_config()) + return workspaces( + "UpdateWorkspacesPool", + Dict{String,Any}("PoolId" => PoolId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_workspaces_pool( + PoolId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces( + "UpdateWorkspacesPool", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("PoolId" => PoolId), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end