diff --git a/src/AWSServices.jl b/src/AWSServices.jl index 259e8f8066..c184e9a912 100644 --- a/src/AWSServices.jl +++ b/src/AWSServices.jl @@ -558,7 +558,6 @@ const migrationhuborchestrator = AWS.RestJSONService( const migrationhubstrategy = AWS.RestJSONService( "migrationhub-strategy", "migrationhub-strategy", "2020-02-19" ) -const mobile = AWS.RestJSONService("AWSMobileHubService", "mobile", "2017-07-01") const mobile_analytics = AWS.RestJSONService( "mobileanalytics", "mobileanalytics", "2014-06-05" ) @@ -640,6 +639,7 @@ const privatenetworks = AWS.RestJSONService( "private-networks", "private-networks", "2021-12-03" ) const proton = AWS.JSONService("proton", "proton", "2020-07-20", "1.0", "AwsProton20200720") +const qapps = AWS.RestJSONService("qapps", "data.qapps", "2023-11-27") const qbusiness = AWS.RestJSONService("qbusiness", "qbusiness", "2023-11-27") const qconnect = AWS.RestJSONService("wisdom", "wisdom", "2020-10-19") const qldb = AWS.RestJSONService("qldb", "qldb", "2019-01-02") diff --git a/src/services/acm.jl b/src/services/acm.jl index 1e05ab2315..68d4b6e640 100644 --- a/src/services/acm.jl +++ b/src/services/acm.jl @@ -223,10 +223,12 @@ end get_certificate(certificate_arn) get_certificate(certificate_arn, params::Dict{String,<:Any}) -Retrieves an Amazon-issued certificate and its certificate chain. The chain consists of the -certificate of the issuing CA and the intermediate certificates of any other subordinate -CAs. All of the certificates are base64 encoded. You can use OpenSSL to decode the -certificates and inspect individual fields. +Retrieves a certificate and its certificate chain. The certificate may be either a public +or private certificate issued using the ACM RequestCertificate action, or a certificate +imported into ACM using the ImportCertificate action. The chain consists of the certificate +of the issuing CA and the intermediate certificates of any other subordinate CAs. All of +the certificates are base64 encoded. You can use OpenSSL to decode the certificates and +inspect individual fields. # Arguments - `certificate_arn`: String that contains a certificate ARN in the following format: @@ -271,23 +273,21 @@ ACM does not provide managed renewal for certificates that you import. Note the guidelines when importing third party certificates: You must enter the private key that matches the certificate you are importing. The private key must be unencrypted. You cannot import a private key that is protected by a password or a passphrase. The private -key must be no larger than 5 KB (5,120 bytes). If the certificate you are importing is -not self-signed, you must enter its certificate chain. If a certificate chain is -included, the issuer must be the subject of one of the certificates in the chain. The -certificate, private key, and certificate chain must be PEM-encoded. The current time -must be between the Not Before and Not After certificate fields. The Issuer field must -not be empty. The OCSP authority URL, if present, must not exceed 1000 characters. To -import a new certificate, omit the CertificateArn argument. Include this argument only when -you want to replace a previously imported certificate. When you import a certificate by -using the CLI, you must specify the certificate, the certificate chain, and the private key -by their file names preceded by fileb://. For example, you can specify a certificate saved -in the C:temp folder as fileb://C:tempcertificate_to_import.pem. If you are making an HTTP -or HTTPS Query request, include these arguments as BLOBs. When you import a certificate -by using an SDK, you must specify the certificate, the certificate chain, and the private -key files in the manner required by the programming language you're using. The -cryptographic algorithm of an imported certificate must match the algorithm of the signing -CA. For example, if the signing CA key type is RSA, then the certificate key type must also -be RSA. This operation returns the Amazon Resource Name (ARN) of the imported certificate. +key must be no larger than 5 KB (5,120 bytes). The certificate, private key, and +certificate chain must be PEM-encoded. The current time must be between the Not Before +and Not After certificate fields. The Issuer field must not be empty. The OCSP +authority URL, if present, must not exceed 1000 characters. To import a new certificate, +omit the CertificateArn argument. Include this argument only when you want to replace a +previously imported certificate. When you import a certificate by using the CLI, you must +specify the certificate, the certificate chain, and the private key by their file names +preceded by fileb://. For example, you can specify a certificate saved in the C:temp folder +as fileb://C:tempcertificate_to_import.pem. If you are making an HTTP or HTTPS Query +request, include these arguments as BLOBs. When you import a certificate by using an +SDK, you must specify the certificate, the certificate chain, and the private key files in +the manner required by the programming language you're using. The cryptographic +algorithm of an imported certificate must match the algorithm of the signing CA. For +example, if the signing CA key type is RSA, then the certificate key type must also be RSA. + This operation returns the Amazon Resource Name (ARN) of the imported certificate. # Arguments - `certificate`: The certificate to import. @@ -335,10 +335,12 @@ end list_certificates() list_certificates(params::Dict{String,<:Any}) -Retrieves a list of certificate ARNs and domain names. You can request that only -certificates that match a specific status be listed. You can also filter by specific -attributes of the certificate. Default filtering returns only RSA_2048 certificates. For -more information, see Filters. +Retrieves a list of certificate ARNs and domain names. By default, the API returns RSA_2048 +certificates. To return all certificates in the account, include the keyType filter with +the values [RSA_1024, RSA_2048, RSA_3072, RSA_4096, EC_prime256v1, EC_secp384r1, +EC_secp521r1]. In addition to keyType, you can also filter by the CertificateStatuses, +keyUsage, and extendedKeyUsage attributes on the certificate. For more information, see +Filters. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -592,10 +594,16 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys certificate uses to encrypt data. RSA is the default key algorithm for ACM certificates. Elliptic Curve Digital Signature Algorithm (ECDSA) keys are smaller, offering security comparable to RSA keys but with greater computing efficiency. However, ECDSA is not - supported by all network clients. Some AWS services may require RSA keys, or only support - ECDSA keys of a particular size, while others allow the use of either RSA and ECDSA keys to - ensure that compatibility is not broken. Check the requirements for the AWS service where - you plan to deploy your certificate. Default: RSA_2048 + supported by all network clients. Some Amazon Web Services services may require RSA keys, + or only support ECDSA keys of a particular size, while others allow the use of either RSA + and ECDSA keys to ensure that compatibility is not broken. Check the requirements for the + Amazon Web Services service where you plan to deploy your certificate. For more information + about selecting an algorithm, see Key algorithms. Algorithms supported for an ACM + certificate request include: RSA_2048 EC_prime256v1 EC_secp384r1 Other + listed algorithms are for imported certificates only. When you request a private PKI + certificate signed by a CA from Amazon Web Services Private CA, the specified signing + algorithm family (RSA or ECDSA) must match the algorithm family of the CA's secret key. + Default: RSA_2048 - `"Options"`: Currently, you can use this parameter to specify whether to add the certificate to a certificate transparency log. Certificate transparency makes it possible to detect SSL/TLS certificates that have been mistakenly or maliciously issued. diff --git a/src/services/acm_pca.jl b/src/services/acm_pca.jl index c039eea54b..3767bc9303 100644 --- a/src/services/acm_pca.jl +++ b/src/services/acm_pca.jl @@ -702,7 +702,7 @@ a Policy for Cross-Account Access. # Arguments - `resource_arn`: The Amazon Resource Number (ARN) of the private CA that will have its policy retrieved. You can find the CA's ARN by calling the ListCertificateAuthorities - action. + action. </p> """ function get_policy(ResourceArn; aws_config::AbstractAWSConfig=global_aws_config()) @@ -754,15 +754,14 @@ certificate signed by the preceding subordinate CA must come next, and so on unt chain is built. The chain must be PEM-encoded. The maximum allowed size of a certificate is 32 KB. The maximum allowed size of a certificate chain is 2 MB. Enforcement of Critical Constraints Amazon Web Services Private CA allows the following -extensions to be marked critical in the imported CA certificate or chain. Authority key -identifier Basic constraints (must be marked critical) Certificate policies Extended -key usage Inhibit anyPolicy Issuer alternative name Key usage Name constraints -Policy mappings Subject alternative name Subject directory attributes Subject key -identifier Subject information access Amazon Web Services Private CA rejects the -following extensions when they are marked critical in an imported CA certificate or chain. - Authority information access CRL distribution points Freshest CRL Policy constraints - Amazon Web Services Private Certificate Authority will also reject any other extension -marked as critical not contained on the preceding list of allowed extensions. +extensions to be marked critical in the imported CA certificate or chain. Basic +constraints (must be marked critical) Subject alternative names Key usage Extended +key usage Authority key identifier Subject key identifier Issuer alternative name +Subject directory attributes Subject information access Certificate policies Policy +mappings Inhibit anyPolicy Amazon Web Services Private CA rejects the following +extensions when they are marked critical in an imported CA certificate or chain. Name +constraints Policy constraints CRL distribution points Authority information access +Freshest CRL Any other extension # Arguments - `certificate`: The PEM-encoded certificate for a private CA. This may be a self-signed diff --git a/src/services/application_auto_scaling.jl b/src/services/application_auto_scaling.jl index e50633819a..c57f3b09e8 100644 --- a/src/services/application_auto_scaling.jl +++ b/src/services/application_auto_scaling.jl @@ -19,7 +19,7 @@ scaling policy in the Application Auto Scaling User Guide. - `resource_id`: The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier. ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: - service/default/sample-webapp. Spot Fleet - The resource type is spot-fleet-request and + service/my-cluster/my-service. Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. @@ -51,26 +51,28 @@ scaling policy in the Application Auto Scaling User Guide. 2e31-5. Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster. Neptune cluster - The resource type is cluster and the unique identifier is the cluster - name. Example: cluster:mycluster. SageMaker Serverless endpoint - The resource type is + name. Example: cluster:mycluster. SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: - inference-component/my-inference-component. + inference-component/my-inference-component. Pool of WorkSpaces - The resource type is + workspacespool and the unique identifier is the pool ID. Example: + workspacespool/wspool-123456. - `scalable_dimension`: The scalable dimension. This string consists of the service - namespace, resource type, and scaling property. ecs:service:DesiredCount - The desired - task count of an ECS service. elasticmapreduce:instancegroup:InstanceCount - The - instance count of an EMR Instance Group. ec2:spot-fleet-request:TargetCapacity - The - target capacity of a Spot Fleet. appstream:fleet:DesiredCapacity - The desired capacity - of an AppStream 2.0 fleet. dynamodb:table:ReadCapacityUnits - The provisioned read - capacity for a DynamoDB table. dynamodb:table:WriteCapacityUnits - The provisioned write - capacity for a DynamoDB table. dynamodb:index:ReadCapacityUnits - The provisioned read - capacity for a DynamoDB global secondary index. dynamodb:index:WriteCapacityUnits - The - provisioned write capacity for a DynamoDB global secondary index. - rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. - Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition. - sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model - endpoint variant. custom-resource:ResourceType:Property - The scalable dimension for a - custom resource provided by your own application or service. + namespace, resource type, and scaling property. ecs:service:DesiredCount - The task + count of an ECS service. elasticmapreduce:instancegroup:InstanceCount - The instance + count of an EMR Instance Group. ec2:spot-fleet-request:TargetCapacity - The target + capacity of a Spot Fleet. appstream:fleet:DesiredCapacity - The capacity of an AppStream + 2.0 fleet. dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a + DynamoDB table. dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a + DynamoDB table. dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a + DynamoDB global secondary index. dynamodb:index:WriteCapacityUnits - The provisioned + write capacity for a DynamoDB global secondary index. rds:cluster:ReadReplicaCount - The + count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible + edition and Aurora PostgreSQL-compatible edition. sagemaker:variant:DesiredInstanceCount + - The number of EC2 instances for a SageMaker model endpoint variant. + custom-resource:ResourceType:Property - The scalable dimension for a custom resource + provided by your own application or service. comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint. comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units @@ -85,8 +87,10 @@ scaling policy in the Application Auto Scaling User Guide. Amazon ElastiCache replication group. neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster. sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a - SageMaker Serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The + SageMaker serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component. + workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the + WorkSpaces in the pool. - `service_namespace`: The namespace of the Amazon Web Services service that provides the resource. For a resource provided by your own application or service, use custom-resource instead. @@ -149,7 +153,7 @@ more information, see Delete a scheduled action in the Application Auto Scaling - `resource_id`: The identifier of the resource associated with the scheduled action. This string consists of the resource type and unique identifier. ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: - service/default/sample-webapp. Spot Fleet - The resource type is spot-fleet-request and + service/my-cluster/my-service. Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. @@ -181,26 +185,28 @@ more information, see Delete a scheduled action in the Application Auto Scaling 2e31-5. Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster. Neptune cluster - The resource type is cluster and the unique identifier is the cluster - name. Example: cluster:mycluster. SageMaker Serverless endpoint - The resource type is + name. Example: cluster:mycluster. SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: - inference-component/my-inference-component. + inference-component/my-inference-component. Pool of WorkSpaces - The resource type is + workspacespool and the unique identifier is the pool ID. Example: + workspacespool/wspool-123456. - `scalable_dimension`: The scalable dimension. This string consists of the service - namespace, resource type, and scaling property. ecs:service:DesiredCount - The desired - task count of an ECS service. elasticmapreduce:instancegroup:InstanceCount - The - instance count of an EMR Instance Group. ec2:spot-fleet-request:TargetCapacity - The - target capacity of a Spot Fleet. appstream:fleet:DesiredCapacity - The desired capacity - of an AppStream 2.0 fleet. dynamodb:table:ReadCapacityUnits - The provisioned read - capacity for a DynamoDB table. dynamodb:table:WriteCapacityUnits - The provisioned write - capacity for a DynamoDB table. dynamodb:index:ReadCapacityUnits - The provisioned read - capacity for a DynamoDB global secondary index. dynamodb:index:WriteCapacityUnits - The - provisioned write capacity for a DynamoDB global secondary index. - rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. - Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition. - sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model - endpoint variant. custom-resource:ResourceType:Property - The scalable dimension for a - custom resource provided by your own application or service. + namespace, resource type, and scaling property. ecs:service:DesiredCount - The task + count of an ECS service. elasticmapreduce:instancegroup:InstanceCount - The instance + count of an EMR Instance Group. ec2:spot-fleet-request:TargetCapacity - The target + capacity of a Spot Fleet. appstream:fleet:DesiredCapacity - The capacity of an AppStream + 2.0 fleet. dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a + DynamoDB table. dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a + DynamoDB table. dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a + DynamoDB global secondary index. dynamodb:index:WriteCapacityUnits - The provisioned + write capacity for a DynamoDB global secondary index. rds:cluster:ReadReplicaCount - The + count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible + edition and Aurora PostgreSQL-compatible edition. sagemaker:variant:DesiredInstanceCount + - The number of EC2 instances for a SageMaker model endpoint variant. + custom-resource:ResourceType:Property - The scalable dimension for a custom resource + provided by your own application or service. comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint. comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units @@ -215,8 +221,10 @@ more information, see Delete a scheduled action in the Application Auto Scaling Amazon ElastiCache replication group. neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster. sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a - SageMaker Serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The + SageMaker serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component. + workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the + WorkSpaces in the pool. - `scheduled_action_name`: The name of the scheduled action. - `service_namespace`: The namespace of the Amazon Web Services service that provides the resource. For a resource provided by your own application or service, use custom-resource @@ -282,7 +290,7 @@ with it. - `resource_id`: The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier. ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: - service/default/sample-webapp. Spot Fleet - The resource type is spot-fleet-request and + service/my-cluster/my-service. Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. @@ -314,17 +322,19 @@ with it. 2e31-5. Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster. Neptune cluster - The resource type is cluster and the unique identifier is the cluster - name. Example: cluster:mycluster. SageMaker Serverless endpoint - The resource type is + name. Example: cluster:mycluster. SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: - inference-component/my-inference-component. + inference-component/my-inference-component. Pool of WorkSpaces - The resource type is + workspacespool and the unique identifier is the pool ID. Example: + workspacespool/wspool-123456. - `scalable_dimension`: The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property. - ecs:service:DesiredCount - The desired task count of an ECS service. + ecs:service:DesiredCount - The task count of an ECS service. elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group. ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet. - appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet. + appstream:fleet:DesiredCapacity - The capacity of an AppStream 2.0 fleet. dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table. dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table. dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global @@ -349,8 +359,10 @@ with it. Amazon ElastiCache replication group. neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster. sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a - SageMaker Serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The + SageMaker serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component. + workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the + WorkSpaces in the pool. - `service_namespace`: The namespace of the Amazon Web Services service that provides the resource. For a resource provided by your own application or service, use custom-resource instead. @@ -421,7 +433,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"ResourceIds"`: The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier. ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: - service/default/sample-webapp. Spot Fleet - The resource type is spot-fleet-request and + service/my-cluster/my-service. Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. @@ -453,18 +465,20 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys 2e31-5. Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster. Neptune cluster - The resource type is cluster and the unique identifier is the cluster - name. Example: cluster:mycluster. SageMaker Serverless endpoint - The resource type is + name. Example: cluster:mycluster. SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: - inference-component/my-inference-component. + inference-component/my-inference-component. Pool of WorkSpaces - The resource type is + workspacespool and the unique identifier is the pool ID. Example: + workspacespool/wspool-123456. - `"ScalableDimension"`: The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID. - ecs:service:DesiredCount - The desired task count of an ECS service. + ecs:service:DesiredCount - The task count of an ECS service. elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group. ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet. - appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet. + appstream:fleet:DesiredCapacity - The capacity of an AppStream 2.0 fleet. dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table. dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table. dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global @@ -489,8 +503,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Amazon ElastiCache replication group. neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster. sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a - SageMaker Serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The + SageMaker serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component. + workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the + WorkSpaces in the pool. """ function describe_scalable_targets( ServiceNamespace; aws_config::AbstractAWSConfig=global_aws_config() @@ -549,7 +565,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"ResourceId"`: The identifier of the resource associated with the scaling activity. This string consists of the resource type and unique identifier. ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: - service/default/sample-webapp. Spot Fleet - The resource type is spot-fleet-request and + service/my-cluster/my-service. Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. @@ -581,20 +597,22 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys 2e31-5. Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster. Neptune cluster - The resource type is cluster and the unique identifier is the cluster - name. Example: cluster:mycluster. SageMaker Serverless endpoint - The resource type is + name. Example: cluster:mycluster. SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: - inference-component/my-inference-component. + inference-component/my-inference-component. Pool of WorkSpaces - The resource type is + workspacespool and the unique identifier is the pool ID. Example: + workspacespool/wspool-123456. - `"ScalableDimension"`: The scalable dimension. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you - must also specify a resource ID. ecs:service:DesiredCount - The desired task count of an - ECS service. elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR + must also specify a resource ID. ecs:service:DesiredCount - The task count of an ECS + service. elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group. ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot - Fleet. appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet. - dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table. - dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table. - dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global + Fleet. appstream:fleet:DesiredCapacity - The capacity of an AppStream 2.0 fleet. + dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table. + dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table. + dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index. dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index. rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora @@ -616,8 +634,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Amazon ElastiCache replication group. neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster. sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a - SageMaker Serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The + SageMaker serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component. + workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the + WorkSpaces in the pool. """ function describe_scaling_activities( ServiceNamespace; aws_config::AbstractAWSConfig=global_aws_config() @@ -672,7 +692,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"ResourceId"`: The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier. ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: - service/default/sample-webapp. Spot Fleet - The resource type is spot-fleet-request and + service/my-cluster/my-service. Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. @@ -704,20 +724,22 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys 2e31-5. Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster. Neptune cluster - The resource type is cluster and the unique identifier is the cluster - name. Example: cluster:mycluster. SageMaker Serverless endpoint - The resource type is + name. Example: cluster:mycluster. SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: - inference-component/my-inference-component. + inference-component/my-inference-component. Pool of WorkSpaces - The resource type is + workspacespool and the unique identifier is the pool ID. Example: + workspacespool/wspool-123456. - `"ScalableDimension"`: The scalable dimension. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you - must also specify a resource ID. ecs:service:DesiredCount - The desired task count of an - ECS service. elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR + must also specify a resource ID. ecs:service:DesiredCount - The task count of an ECS + service. elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group. ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot - Fleet. appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet. - dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table. - dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table. - dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global + Fleet. appstream:fleet:DesiredCapacity - The capacity of an AppStream 2.0 fleet. + dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table. + dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table. + dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index. dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index. rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora @@ -739,8 +761,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Amazon ElastiCache replication group. neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster. sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a - SageMaker Serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The + SageMaker serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component. + workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the + WorkSpaces in the pool. """ function describe_scaling_policies( ServiceNamespace; aws_config::AbstractAWSConfig=global_aws_config() @@ -775,8 +799,8 @@ end Describes the Application Auto Scaling scheduled actions for the specified service namespace. You can filter the results using the ResourceId, ScalableDimension, and -ScheduledActionNames parameters. For more information, see Scheduled scaling and Managing -scheduled scaling in the Application Auto Scaling User Guide. +ScheduledActionNames parameters. For more information, see Scheduled scaling in the +Application Auto Scaling User Guide. # Arguments - `service_namespace`: The namespace of the Amazon Web Services service that provides the @@ -794,7 +818,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"ResourceId"`: The identifier of the resource associated with the scheduled action. This string consists of the resource type and unique identifier. ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: - service/default/sample-webapp. Spot Fleet - The resource type is spot-fleet-request and + service/my-cluster/my-service. Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. @@ -826,20 +850,22 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys 2e31-5. Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster. Neptune cluster - The resource type is cluster and the unique identifier is the cluster - name. Example: cluster:mycluster. SageMaker Serverless endpoint - The resource type is + name. Example: cluster:mycluster. SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: - inference-component/my-inference-component. + inference-component/my-inference-component. Pool of WorkSpaces - The resource type is + workspacespool and the unique identifier is the pool ID. Example: + workspacespool/wspool-123456. - `"ScalableDimension"`: The scalable dimension. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you - must also specify a resource ID. ecs:service:DesiredCount - The desired task count of an - ECS service. elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR + must also specify a resource ID. ecs:service:DesiredCount - The task count of an ECS + service. elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group. ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot - Fleet. appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet. - dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table. - dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table. - dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global + Fleet. appstream:fleet:DesiredCapacity - The capacity of an AppStream 2.0 fleet. + dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table. + dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table. + dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index. dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index. rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora @@ -861,8 +887,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Amazon ElastiCache replication group. neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster. sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a - SageMaker Serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The + SageMaker serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component. + workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the + WorkSpaces in the pool. - `"ScheduledActionNames"`: The names of the scheduled actions to describe. """ function describe_scheduled_actions( @@ -897,8 +925,8 @@ end list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) Returns all the tags on the specified Application Auto Scaling scalable target. For general -information about tags, including the format and syntax, see Tagging Amazon Web Services -resources in the Amazon Web Services General Reference. +information about tags, including the format and syntax, see Tagging your Amazon Web +Services resources in the Amazon Web Services General Reference. # Arguments - `resource_arn`: Specify the ARN of the scalable target. For example: @@ -964,7 +992,7 @@ scaling policies that were specified for the scalable target are deleted. - `resource_id`: The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier. ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: - service/default/sample-webapp. Spot Fleet - The resource type is spot-fleet-request and + service/my-cluster/my-service. Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. @@ -996,26 +1024,28 @@ scaling policies that were specified for the scalable target are deleted. 2e31-5. Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster. Neptune cluster - The resource type is cluster and the unique identifier is the cluster - name. Example: cluster:mycluster. SageMaker Serverless endpoint - The resource type is + name. Example: cluster:mycluster. SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: - inference-component/my-inference-component. + inference-component/my-inference-component. Pool of WorkSpaces - The resource type is + workspacespool and the unique identifier is the pool ID. Example: + workspacespool/wspool-123456. - `scalable_dimension`: The scalable dimension. This string consists of the service - namespace, resource type, and scaling property. ecs:service:DesiredCount - The desired - task count of an ECS service. elasticmapreduce:instancegroup:InstanceCount - The - instance count of an EMR Instance Group. ec2:spot-fleet-request:TargetCapacity - The - target capacity of a Spot Fleet. appstream:fleet:DesiredCapacity - The desired capacity - of an AppStream 2.0 fleet. dynamodb:table:ReadCapacityUnits - The provisioned read - capacity for a DynamoDB table. dynamodb:table:WriteCapacityUnits - The provisioned write - capacity for a DynamoDB table. dynamodb:index:ReadCapacityUnits - The provisioned read - capacity for a DynamoDB global secondary index. dynamodb:index:WriteCapacityUnits - The - provisioned write capacity for a DynamoDB global secondary index. - rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. - Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition. - sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model - endpoint variant. custom-resource:ResourceType:Property - The scalable dimension for a - custom resource provided by your own application or service. + namespace, resource type, and scaling property. ecs:service:DesiredCount - The task + count of an ECS service. elasticmapreduce:instancegroup:InstanceCount - The instance + count of an EMR Instance Group. ec2:spot-fleet-request:TargetCapacity - The target + capacity of a Spot Fleet. appstream:fleet:DesiredCapacity - The capacity of an AppStream + 2.0 fleet. dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a + DynamoDB table. dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a + DynamoDB table. dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a + DynamoDB global secondary index. dynamodb:index:WriteCapacityUnits - The provisioned + write capacity for a DynamoDB global secondary index. rds:cluster:ReadReplicaCount - The + count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible + edition and Aurora PostgreSQL-compatible edition. sagemaker:variant:DesiredInstanceCount + - The number of EC2 instances for a SageMaker model endpoint variant. + custom-resource:ResourceType:Property - The scalable dimension for a custom resource + provided by your own application or service. comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint. comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units @@ -1030,8 +1060,10 @@ scaling policies that were specified for the scalable target are deleted. Amazon ElastiCache replication group. neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster. sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a - SageMaker Serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The + SageMaker serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component. + workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the + WorkSpaces in the pool. - `service_namespace`: The namespace of the Amazon Web Services service that provides the resource. For a resource provided by your own application or service, use custom-resource instead. @@ -1116,7 +1148,7 @@ scheduled actions that were specified for the scalable target are deleted. - `resource_id`: The identifier of the resource associated with the scheduled action. This string consists of the resource type and unique identifier. ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: - service/default/sample-webapp. Spot Fleet - The resource type is spot-fleet-request and + service/my-cluster/my-service. Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. @@ -1148,26 +1180,28 @@ scheduled actions that were specified for the scalable target are deleted. 2e31-5. Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster. Neptune cluster - The resource type is cluster and the unique identifier is the cluster - name. Example: cluster:mycluster. SageMaker Serverless endpoint - The resource type is + name. Example: cluster:mycluster. SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: - inference-component/my-inference-component. + inference-component/my-inference-component. Pool of WorkSpaces - The resource type is + workspacespool and the unique identifier is the pool ID. Example: + workspacespool/wspool-123456. - `scalable_dimension`: The scalable dimension. This string consists of the service - namespace, resource type, and scaling property. ecs:service:DesiredCount - The desired - task count of an ECS service. elasticmapreduce:instancegroup:InstanceCount - The - instance count of an EMR Instance Group. ec2:spot-fleet-request:TargetCapacity - The - target capacity of a Spot Fleet. appstream:fleet:DesiredCapacity - The desired capacity - of an AppStream 2.0 fleet. dynamodb:table:ReadCapacityUnits - The provisioned read - capacity for a DynamoDB table. dynamodb:table:WriteCapacityUnits - The provisioned write - capacity for a DynamoDB table. dynamodb:index:ReadCapacityUnits - The provisioned read - capacity for a DynamoDB global secondary index. dynamodb:index:WriteCapacityUnits - The - provisioned write capacity for a DynamoDB global secondary index. - rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. - Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition. - sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model - endpoint variant. custom-resource:ResourceType:Property - The scalable dimension for a - custom resource provided by your own application or service. + namespace, resource type, and scaling property. ecs:service:DesiredCount - The task + count of an ECS service. elasticmapreduce:instancegroup:InstanceCount - The instance + count of an EMR Instance Group. ec2:spot-fleet-request:TargetCapacity - The target + capacity of a Spot Fleet. appstream:fleet:DesiredCapacity - The capacity of an AppStream + 2.0 fleet. dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a + DynamoDB table. dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a + DynamoDB table. dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a + DynamoDB global secondary index. dynamodb:index:WriteCapacityUnits - The provisioned + write capacity for a DynamoDB global secondary index. rds:cluster:ReadReplicaCount - The + count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible + edition and Aurora PostgreSQL-compatible edition. sagemaker:variant:DesiredInstanceCount + - The number of EC2 instances for a SageMaker model endpoint variant. + custom-resource:ResourceType:Property - The scalable dimension for a custom resource + provided by your own application or service. comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint. comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units @@ -1182,8 +1216,10 @@ scheduled actions that were specified for the scalable target are deleted. Amazon ElastiCache replication group. neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster. sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a - SageMaker Serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The + SageMaker serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component. + workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the + WorkSpaces in the pool. - `scheduled_action_name`: The name of the scheduled action. This name must be unique among all other scheduled actions on the specified scalable target. - `service_namespace`: The namespace of the Amazon Web Services service that provides the @@ -1205,8 +1241,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys At and cron expressions use Universal Coordinated Time (UTC) by default. The cron format consists of six fields separated by white spaces: [Minutes] [Hours] [Day_of_Month] [Month] [Day_of_Week] [Year]. For rate expressions, value is a positive integer and unit is minute - | minutes | hour | hours | day | days. For more information and examples, see Example - scheduled actions for Application Auto Scaling in the Application Auto Scaling User Guide. + | minutes | hour | hours | day | days. For more information, see Schedule recurring scaling + actions using cron expressions in the Application Auto Scaling User Guide. - `"StartTime"`: The date and time for this scheduled action to start, in UTC. - `"Timezone"`: Specifies the time zone used when setting a scheduled action by using an at or cron expression. If a time zone is not provided, UTC is used by default. Valid values @@ -1294,7 +1330,7 @@ even if you don't include the MinCapacity or MaxCapacity request parameters. - `resource_id`: The identifier of the resource that is associated with the scalable target. This string consists of the resource type and unique identifier. ECS service - The resource type is service and the unique identifier is the cluster name and service - name. Example: service/default/sample-webapp. Spot Fleet - The resource type is + name. Example: service/my-cluster/my-service. Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. @@ -1326,17 +1362,19 @@ even if you don't include the MinCapacity or MaxCapacity request parameters. 2e31-5. Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster. Neptune cluster - The resource type is cluster and the unique identifier is the cluster - name. Example: cluster:mycluster. SageMaker Serverless endpoint - The resource type is + name. Example: cluster:mycluster. SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: - inference-component/my-inference-component. + inference-component/my-inference-component. Pool of WorkSpaces - The resource type is + workspacespool and the unique identifier is the pool ID. Example: + workspacespool/wspool-123456. - `scalable_dimension`: The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property. - ecs:service:DesiredCount - The desired task count of an ECS service. + ecs:service:DesiredCount - The task count of an ECS service. elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group. ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet. - appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet. + appstream:fleet:DesiredCapacity - The capacity of an AppStream 2.0 fleet. dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table. dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table. dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global @@ -1361,8 +1399,10 @@ even if you don't include the MinCapacity or MaxCapacity request parameters. Amazon ElastiCache replication group. neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster. sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a - SageMaker Serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The + SageMaker serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component. + workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the + WorkSpaces in the pool. - `service_namespace`: The namespace of the Amazon Web Services service that provides the resource. For a resource provided by your own application or service, use custom-resource instead. @@ -1383,20 +1423,20 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys capacity limit in response to changing demand. This property is required when registering a new scalable target. For the following resources, the minimum value allowed is 0. AppStream 2.0 fleets Aurora DB clusters ECS services EMR clusters Lambda - provisioned concurrency SageMaker endpoint variants SageMaker Serverless endpoint - provisioned concurrency Spot Fleets custom resources It's strongly recommended that - you specify a value greater than 0. A value greater than 0 means that data points are - continuously reported to CloudWatch that scaling policies can use to scale on a metric like - average CPU utilization. For all other resources, the minimum allowed value depends on the - type of resource that you are using. If you provide a value that is lower than what a - resource can accept, an error occurs. In which case, the error message will provide the - minimum value that the resource can accept. + provisioned concurrency SageMaker endpoint variants SageMaker inference components + SageMaker serverless endpoint provisioned concurrency Spot Fleets custom resources + It's strongly recommended that you specify a value greater than 0. A value greater than 0 + means that data points are continuously reported to CloudWatch that scaling policies can + use to scale on a metric like average CPU utilization. For all other resources, the minimum + allowed value depends on the type of resource that you are using. If you provide a value + that is lower than what a resource can accept, an error occurs. In which case, the error + message will provide the minimum value that the resource can accept. - `"RoleARN"`: This parameter is required for services that do not support service-linked roles (such as Amazon EMR), and it must specify the ARN of an IAM role that allows Application Auto Scaling to modify the scalable target on your behalf. If the service supports service-linked roles, Application Auto Scaling uses a service-linked role, which - it creates if it does not yet exist. For more information, see Application Auto Scaling IAM - roles. + it creates if it does not yet exist. For more information, see How Application Auto Scaling + works with IAM. - `"SuspendedState"`: An embedded object that contains attributes and attribute values that are used to suspend and resume automatic scaling. Setting the value of an attribute to true suspends the specified scaling activities. Setting it to false (default) resumes the @@ -1405,8 +1445,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are suspended. For DynamicScalingOutSuspended, while a suspension is in effect, all scale-out activities that are triggered by a scaling policy are suspended. For ScheduledScalingSuspended, while a suspension is in effect, all scaling activities that - involve scheduled actions are suspended. For more information, see Suspending and - resuming scaling in the Application Auto Scaling User Guide. + involve scheduled actions are suspended. For more information, see Suspend and resume + scaling in the Application Auto Scaling User Guide. - `"Tags"`: Assigns one or more tags to the scalable target. Use this parameter to tag the scalable target when it is created. To tag an existing scalable target, use the TagResource operation. Each tag consists of a tag key and a tag value. Both the tag key and the tag @@ -1466,10 +1506,10 @@ tag key and a tag value. To edit a tag, specify an existing tag key and a new ta You can use this operation to tag an Application Auto Scaling scalable target, but you cannot tag a scaling policy or scheduled action. You can also add tags to an Application Auto Scaling scalable target while creating it (RegisterScalableTarget). For general -information about tags, including the format and syntax, see Tagging Amazon Web Services -resources in the Amazon Web Services General Reference. Use tags to control access to a -scalable target. For more information, see Tagging support for Application Auto Scaling in -the Application Auto Scaling User Guide. +information about tags, including the format and syntax, see Tagging your Amazon Web +Services resources in the Amazon Web Services General Reference. Use tags to control access +to a scalable target. For more information, see Tagging support for Application Auto +Scaling in the Application Auto Scaling User Guide. # Arguments - `resource_arn`: Identifies the Application Auto Scaling scalable target that you want to @@ -1482,7 +1522,7 @@ the Application Auto Scaling User Guide. specify an existing tag key with a different tag value, Application Auto Scaling replaces the current tag value with the specified one. For information about the rules that apply to tag keys and tag values, see User-defined tag restrictions in the Amazon Web Services - Billing and Cost Management User Guide. + Billing User Guide. """ function tag_resource(ResourceARN, Tags; aws_config::AbstractAWSConfig=global_aws_config()) diff --git a/src/services/arc_zonal_shift.jl b/src/services/arc_zonal_shift.jl index 57a6c9300b..b038bb8081 100644 --- a/src/services/arc_zonal_shift.jl +++ b/src/services/arc_zonal_shift.jl @@ -48,8 +48,11 @@ autoshift. A practice run configuration includes specifications for blocked date blocked time windows, and for Amazon CloudWatch alarms that you create to use with practice runs. The alarms that you specify are an outcome alarm, to monitor application health during practice runs and, optionally, a blocking alarm, to block practice runs from -starting. For more information, see Considerations when you configure zonal autoshift in -the Amazon Route 53 Application Recovery Controller Developer Guide. +starting. When a resource has a practice run configuration, Route 53 ARC starts zonal +shifts for the resource weekly, to shift traffic for practice runs. Practice runs help you +to ensure that shifting away traffic from an Availability Zone during an autoshift is safe +for your application. For more information, see Considerations when you configure zonal +autoshift in the Amazon Route 53 Application Recovery Controller Developer Guide. # Arguments - `outcome_alarms`: The outcome alarm for practice runs is a required Amazon CloudWatch @@ -58,10 +61,10 @@ the Amazon Route 53 Application Recovery Controller Developer Guide. from an Availability Zone during each weekly practice run. You should configure the alarm to go into an ALARM state if your application is impacted by the zonal shift, and you want to stop the zonal shift, to let traffic for the resource return to the Availability Zone. -- `resource_identifier`: The identifier of the resource to shift away traffic for when a - practice run starts a zonal shift. The identifier is the Amazon Resource Name (ARN) for the - resource. At this time, supported resources are Network Load Balancers and Application Load - Balancers with cross-zone load balancing turned off. +- `resource_identifier`: The identifier of the resource that Amazon Web Services shifts + traffic for with a practice run zonal shift. The identifier is the Amazon Resource Name + (ARN) for the resource. At this time, supported resources are Network Load Balancers and + Application Load Balancers with cross-zone load balancing turned off. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -158,6 +161,42 @@ function delete_practice_run_configuration( ) end +""" + get_autoshift_observer_notification_status() + get_autoshift_observer_notification_status(params::Dict{String,<:Any}) + +Returns the status of autoshift observer notification. Autoshift observer notification +enables you to be notified, through Amazon EventBridge, when there is an autoshift event +for zonal autoshift. If the status is ENABLED, Route 53 ARC includes all autoshift events +when you use the EventBridge pattern Autoshift In Progress. When the status is DISABLED, +Route 53 ARC includes only autoshift events for autoshifts when one or more of your +resources is included in the autoshift. For more information, see Notifications for +practice runs and autoshifts in the Amazon Route 53 Application Recovery Controller +Developer Guide. + +""" +function get_autoshift_observer_notification_status(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return arc_zonal_shift( + "GET", + "/autoshift-observer-notification"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_autoshift_observer_notification_status( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return arc_zonal_shift( + "GET", + "/autoshift-observer-notification", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_managed_resource(resource_identifier) get_managed_resource(resource_identifier, params::Dict{String,<:Any}) @@ -170,10 +209,10 @@ start a zonal shift or configure zonal autoshift for Network Load Balancers and Load Balancers with cross-zone load balancing turned off. # Arguments -- `resource_identifier`: The identifier for the resource to shift away traffic for. The - identifier is the Amazon Resource Name (ARN) for the resource. At this time, supported - resources are Network Load Balancers and Application Load Balancers with cross-zone load - balancing turned off. +- `resource_identifier`: The identifier for the resource that Amazon Web Services shifts + traffic for. The identifier is the Amazon Resource Name (ARN) for the resource. At this + time, supported resources are Network Load Balancers and Application Load Balancers with + cross-zone load balancing turned off. """ function get_managed_resource( @@ -204,7 +243,9 @@ end list_autoshifts() list_autoshifts(params::Dict{String,<:Any}) -Returns the active autoshifts for a specified resource. +Returns a list of autoshifts for an Amazon Web Services Region. By default, the call +returns only ACTIVE autoshifts. Optionally, you can specify the status parameter to return +COMPLETED autoshifts. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -269,9 +310,9 @@ end Lists all active and completed zonal shifts in Amazon Route 53 Application Recovery Controller in your Amazon Web Services account in this Amazon Web Services Region. -ListZonalShifts returns customer-started zonal shifts, as well as practice run zonal shifts -that Route 53 ARC started on your behalf for zonal autoshift. The ListZonalShifts operation -does not list autoshifts. For more information about listing autoshifts, see +ListZonalShifts returns customer-initiated zonal shifts, as well as practice run zonal +shifts that Route 53 ARC started on your behalf for zonal autoshift. The ListZonalShifts +operation does not list autoshifts. For more information about listing autoshifts, see \">ListAutoshifts. # Optional Parameters @@ -324,9 +365,10 @@ Availability Zone to complete. For more information, see Zonal shift in the Amaz Application Recovery Controller Developer Guide. # Arguments -- `away_from`: The Availability Zone that traffic is moved away from for a resource when - you start a zonal shift. Until the zonal shift expires or you cancel it, traffic for the - resource is instead moved to other Availability Zones in the Amazon Web Services Region. +- `away_from`: The Availability Zone (for example, use1-az1) that traffic is moved away + from for a resource when you start a zonal shift. Until the zonal shift expires or you + cancel it, traffic for the resource is instead moved to other Availability Zones in the + Amazon Web Services Region. - `comment`: A comment that you enter about the zonal shift. Only the latest comment is retained; no comment history is maintained. A new comment overwrites any existing comment string. @@ -340,10 +382,10 @@ Application Recovery Controller Developer Guide. A lowercase letter m: To specify that the value is in minutes. A lowercase letter h: To specify that the value is in hours. For example: 20h means the zonal shift expires in 20 hours. 120m means the zonal shift expires in 120 minutes (2 hours). -- `resource_identifier`: The identifier for the resource to shift away traffic for. The - identifier is the Amazon Resource Name (ARN) for the resource. At this time, supported - resources are Network Load Balancers and Application Load Balancers with cross-zone load - balancing turned off. +- `resource_identifier`: The identifier for the resource that Amazon Web Services shifts + traffic for. The identifier is the Amazon Resource Name (ARN) for the resource. At this + time, supported resources are Network Load Balancers and Application Load Balancers with + cross-zone load balancing turned off. """ function start_zonal_shift( @@ -394,6 +436,50 @@ function start_zonal_shift( ) end +""" + update_autoshift_observer_notification_status(status) + update_autoshift_observer_notification_status(status, params::Dict{String,<:Any}) + +Update the status of autoshift observer notification. Autoshift observer notification +enables you to be notified, through Amazon EventBridge, when there is an autoshift event +for zonal autoshift. If the status is ENABLED, Route 53 ARC includes all autoshift events +when you use the EventBridge pattern Autoshift In Progress. When the status is DISABLED, +Route 53 ARC includes only autoshift events for autoshifts when one or more of your +resources is included in the autoshift. For more information, see Notifications for +practice runs and autoshifts in the Amazon Route 53 Application Recovery Controller +Developer Guide. + +# Arguments +- `status`: The status to set for autoshift observer notification. If the status is + ENABLED, Route 53 ARC includes all autoshift events when you use the Amazon EventBridge + pattern Autoshift In Progress. When the status is DISABLED, Route 53 ARC includes only + autoshift events for autoshifts when one or more of your resources is included in the + autoshift. + +""" +function update_autoshift_observer_notification_status( + status; aws_config::AbstractAWSConfig=global_aws_config() +) + return arc_zonal_shift( + "PUT", + "/autoshift-observer-notification", + Dict{String,Any}("status" => status); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_autoshift_observer_notification_status( + status, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return arc_zonal_shift( + "PUT", + "/autoshift-observer-notification", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("status" => status), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_practice_run_configuration(resource_identifier) update_practice_run_configuration(resource_identifier, params::Dict{String,<:Any}) @@ -457,17 +543,25 @@ end update_zonal_autoshift_configuration(resource_identifier, zonal_autoshift_status) update_zonal_autoshift_configuration(resource_identifier, zonal_autoshift_status, params::Dict{String,<:Any}) -You can update the zonal autoshift status for a resource, to enable or disable zonal -autoshift. When zonal autoshift is ENABLED, Amazon Web Services shifts away resource -traffic from an Availability Zone, on your behalf, when Amazon Web Services determines that -there's an issue in the Availability Zone that could potentially affect customers. +The zonal autoshift configuration for a resource includes the practice run configuration +and the status for running autoshifts, zonal autoshift status. When a resource has a +practice run configuation, Route 53 ARC starts weekly zonal shifts for the resource, to +shift traffic away from an Availability Zone. Weekly practice runs help you to make sure +that your application can continue to operate normally with the loss of one Availability +Zone. You can update the zonal autoshift autoshift status to enable or disable zonal +autoshift. When zonal autoshift is ENABLED, you authorize Amazon Web Services to shift away +resource traffic for an application from an Availability Zone during events, on your +behalf, to help reduce time to recovery. Traffic is also shifted away for the required +weekly practice runs. # Arguments - `resource_identifier`: The identifier for the resource that you want to update the zonal autoshift configuration for. The identifier is the Amazon Resource Name (ARN) for the resource. - `zonal_autoshift_status`: The zonal autoshift status for the resource that you want to - update the zonal autoshift configuration for. + update the zonal autoshift configuration for. Choose ENABLED to authorize Amazon Web + Services to shift away resource traffic for an application from an Availability Zone during + events, on your behalf, to help reduce time to recovery. """ function update_zonal_autoshift_configuration( diff --git a/src/services/bedrock.jl b/src/services/bedrock.jl index d4d5f6aad1..3ddae2c6ff 100644 --- a/src/services/bedrock.jl +++ b/src/services/bedrock.jl @@ -98,22 +98,20 @@ end create_guardrail(blocked_input_messaging, blocked_outputs_messaging, name) create_guardrail(blocked_input_messaging, blocked_outputs_messaging, name, params::Dict{String,<:Any}) -Creates a guardrail to block topics and to filter out harmful content. Specify a name and -optional description. Specify messages for when the guardrail successfully blocks a -prompt or a model response in the blockedInputMessaging and blockedOutputsMessaging fields. - Specify topics for the guardrail to deny in the topicPolicyConfig object. Each -GuardrailTopicConfig object in the topicsConfig list pertains to one topic. Give a name -and description so that the guardrail can properly identify the topic. Specify DENY in -the type field. (Optional) Provide up to five prompts that you would categorize as -belonging to the topic in the examples list. Specify filter strengths for the harmful -categories defined in Amazon Bedrock in the contentPolicyConfig object. Each -GuardrailContentFilterConfig object in the filtersConfig list pertains to a harmful -category. For more information, see Content filters. For more information about the fields -in a content filter, see GuardrailContentFilterConfig. Specify the category in the type -field. Specify the strength of the filter for prompts in the inputStrength field and for -model responses in the strength field of the GuardrailContentFilterConfig. (Optional) -For security, include the ARN of a KMS key in the kmsKeyId field. (Optional) Attach any -tags to the guardrail in the tags object. For more information, see Tag resources. +Creates a guardrail to block topics and to implement safeguards for your generative AI +applications. You can configure the following policies in a guardrail to avoid undesirable +and harmful content, filter out denied topics and words, and remove sensitive information +for privacy protection. Content filters - Adjust filter strengths to block input prompts +or model responses containing harmful content. Denied topics - Define a set of topics +that are undesirable in the context of your application. These topics will be blocked if +detected in user queries or model responses. Word filters - Configure filters to block +undesirable words, phrases, and profanity. Such words can include offensive terms, +competitor names etc. Sensitive information filters - Block or mask sensitive +information such as personally identifiable information (PII) or custom regex in user +inputs and model responses. In addition to the above policies, you can also configure the +messages to be returned to the user if a user input or model response is in violation of +the policies defined in the guardrail. For more information, see Guardrails for Amazon +Bedrock in the Amazon Bedrock User Guide. # Arguments - `blocked_input_messaging`: The message to return when the guardrail blocks a prompt. @@ -128,6 +126,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Bedrock ignores the request, but does not return an error. For more information, see Ensuring idempotency in the Amazon S3 User Guide. - `"contentPolicyConfig"`: The content filter policies to configure for the guardrail. +- `"contextualGroundingPolicyConfig"`: The contextual grounding policy configuration used + to create a guardrail. - `"description"`: A description of the guardrail. - `"kmsKeyId"`: The ARN of the KMS key that you use to encrypt the guardrail. - `"sensitiveInformationPolicyConfig"`: The sensitive information policy to configure for @@ -191,7 +191,8 @@ you are satisfied with a configuration, or to compare the configuration with ano version. # Arguments -- `guardrail_identifier`: The unique identifier of the guardrail. +- `guardrail_identifier`: The unique identifier of the guardrail. This can be an ID or the + ARN. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -461,7 +462,8 @@ guardrailIdentifier field. If you delete a guardrail, all of its versions will b guardrailIdentifier field and the version in the guardrailVersion field. # Arguments -- `guardrail_identifier`: The unique identifier of the guardrail. +- `guardrail_identifier`: The unique identifier of the guardrail. This can be an ID or the + ARN. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -670,6 +672,7 @@ details for the DRAFT version. # Arguments - `guardrail_identifier`: The unique identifier of the guardrail for which to get details. + This can be an ID or the ARN. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -924,7 +927,8 @@ another ListGuardrails request to see the next batch of results. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"guardrailIdentifier"`: The unique identifier of the guardrail. +- `"guardrailIdentifier"`: The unique identifier of the guardrail. This can be an ID or the + ARN. - `"maxResults"`: The maximum number of results to return in the response. - `"nextToken"`: If there are more results than were returned in the response, the response returns a nextToken that you can send in another ListGuardrails request to see the next @@ -1282,19 +1286,21 @@ filters. For more information about the fields in a content filter, see GuardrailContentFilterConfig. Specify the category in the type field. Specify the strength of the filter for prompts in the inputStrength field and for model responses in the strength field of the GuardrailContentFilterConfig. (Optional) For security, -include the ARN of a KMS key in the kmsKeyId field. (Optional) Attach any tags to the -guardrail in the tags object. For more information, see Tag resources. +include the ARN of a KMS key in the kmsKeyId field. # Arguments - `blocked_input_messaging`: The message to return when the guardrail blocks a prompt. - `blocked_outputs_messaging`: The message to return when the guardrail blocks a model response. -- `guardrail_identifier`: The unique identifier of the guardrail +- `guardrail_identifier`: The unique identifier of the guardrail. This can be an ID or the + ARN. - `name`: A name for the guardrail. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"contentPolicyConfig"`: The content policy to configure for the guardrail. +- `"contextualGroundingPolicyConfig"`: The contextual grounding policy configuration used + to update a guardrail. - `"description"`: A description of the guardrail. - `"kmsKeyId"`: The ARN of the KMS key with which to encrypt the guardrail. - `"sensitiveInformationPolicyConfig"`: The sensitive information policy to configure for diff --git a/src/services/bedrock_agent.jl b/src/services/bedrock_agent.jl index 5e087e7dd7..4276725649 100644 --- a/src/services/bedrock_agent.jl +++ b/src/services/bedrock_agent.jl @@ -79,8 +79,10 @@ Resource Name (ARN) of the role with permissions to invoke API operations on an (Optional) customerEncryptionKeyArn – The Amazon Resource Name (ARN) of a KMS key to encrypt the creation of the agent. (Optional) idleSessionTTLinSeconds – Specify the number of seconds for which the agent should maintain session information. After this time -expires, the subsequent InvokeAgent request begins a new session. To override the -default prompt behavior for agent orchestration and to use advanced prompts, include a +expires, the subsequent InvokeAgent request begins a new session. To enable your agent +to retain conversational context across multiple sessions, include a memoryConfiguration +object. For more information, see Configure memory. To override the default prompt +behavior for agent orchestration and to use advanced prompts, include a promptOverrideConfiguration object. For more information, see Advanced prompts. If you agent fails to be created, the response returns a list of failureReasons alongside a list of recommendedActions for you to troubleshoot. @@ -109,6 +111,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys expires and Amazon Bedrock deletes any data provided before the timeout. - `"instruction"`: Instructions that tell the agent what it should do and how it should interact with users. +- `"memoryConfiguration"`: Contains the details of the memory configured for the agent. - `"promptOverrideConfiguration"`: Contains configurations to override prompts in different parts of an agent sequence. For more information, see Advanced prompts. - `"tags"`: Any tags that you want to attach to the agent. @@ -152,11 +155,13 @@ Creates an action group for an agent. An action group represents the actions tha can carry out for the customer by defining the APIs that an agent can call and the logic for calling them. To allow your agent to request the user for additional information when trying to complete a task, add an action group with the parentActionGroupSignature field -set to AMAZON.UserInput. You must leave the description, apiSchema, and actionGroupExecutor -fields blank for this action group. During orchestration, if your agent determines that it -needs to invoke an API in an action group, but doesn't have enough information to complete -the API request, it will invoke this action group instead and return an Observation -reprompting the user for more information. +set to AMAZON.UserInput. To allow your agent to generate, run, and troubleshoot code when +trying to complete a task, add an action group with the parentActionGroupSignature field +set to AMAZON.CodeInterpreter. You must leave the description, apiSchema, and +actionGroupExecutor fields blank for this action group. During orchestration, if your agent +determines that it needs to invoke an API in an action group, but doesn't have enough +information to complete the API request, it will invoke this action group instead and +return an Observation reprompting the user for more information. # Arguments - `action_group_name`: The name to give the action group. @@ -183,10 +188,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"parentActionGroupSignature"`: To allow your agent to request the user for additional information when trying to complete a task, set this field to AMAZON.UserInput. You must leave the description, apiSchema, and actionGroupExecutor fields blank for this action - group. During orchestration, if your agent determines that it needs to invoke an API in an - action group, but doesn't have enough information to complete the API request, it will - invoke this action group instead and return an Observation reprompting the user for more - information. + group. To allow your agent to generate, run, and troubleshoot code when trying to complete + a task, set this field to AMAZON.CodeInterpreter. You must leave the description, + apiSchema, and actionGroupExecutor fields blank for this action group. During + orchestration, if your agent determines that it needs to invoke an API in an action group, + but doesn't have enough information to complete the API request, it will invoke this action + group instead and return an Observation reprompting the user for more information. """ function create_agent_action_group( actionGroupName, @@ -288,11 +295,11 @@ end create_data_source(data_source_configuration, knowledge_base_id, name) create_data_source(data_source_configuration, knowledge_base_id, name, params::Dict{String,<:Any}) -Sets up a data source to be added to a knowledge base. You can't change the -chunkingConfiguration after you create the data source. +Creates a data source connector for a knowledge base. You can't change the +chunkingConfiguration after you create the data source connector. # Arguments -- `data_source_configuration`: Contains metadata about where the data source is stored. +- `data_source_configuration`: The connection configuration for the data source. - `knowledge_base_id`: The unique identifier of the knowledge base to which to add the data source. - `name`: The name of the data source. @@ -303,7 +310,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys completes no more than one time. If this token matches a previous request, Amazon Bedrock ignores the request, but does not return an error. For more information, see Ensuring idempotency. -- `"dataDeletionPolicy"`: The data deletion policy assigned to the data source. +- `"dataDeletionPolicy"`: The data deletion policy for the data source. You can set the + data deletion policy to: DELETE: Deletes all underlying data belonging to the data source + from the vector store upon deletion of a knowledge base or data source resource. Note that + the vector store itself is not deleted, only the underlying data. This flag is ignored if + an Amazon Web Services account is deleted. RETAIN: Retains all underlying data in your + vector store upon deletion of a knowledge base or data source resource. - `"description"`: A description of the data source. - `"serverSideEncryptionConfiguration"`: Contains details about the server-side encryption for the data source. @@ -354,6 +366,185 @@ function create_data_source( ) end +""" + create_flow(execution_role_arn, name) + create_flow(execution_role_arn, name, params::Dict{String,<:Any}) + +Creates a prompt flow that you can use to send an input through various steps to yield an +output. Configure nodes, each of which corresponds to a step of the flow, and create +connections between the nodes to create paths to different outputs. For more information, +see How it works and Create a flow in Amazon Bedrock in the Amazon Bedrock User Guide. + +# Arguments +- `execution_role_arn`: The Amazon Resource Name (ARN) of the service role with permissions + to create and manage a flow. For more information, see Create a service role for flows in + Amazon Bedrock in the Amazon Bedrock User Guide. +- `name`: A name for the flow. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier to ensure that the API request + completes no more than one time. If this token matches a previous request, Amazon Bedrock + ignores the request, but does not return an error. For more information, see Ensuring + idempotency. +- `"customerEncryptionKeyArn"`: The Amazon Resource Name (ARN) of the KMS key to encrypt + the flow. +- `"definition"`: A definition of the nodes and connections between nodes in the flow. +- `"description"`: A description for the flow. +- `"tags"`: Any tags that you want to attach to the flow. For more information, see Tagging + resources in Amazon Bedrock. +""" +function create_flow( + executionRoleArn, name; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent( + "POST", + "/flows/", + Dict{String,Any}( + "executionRoleArn" => executionRoleArn, + "name" => name, + "clientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_flow( + executionRoleArn, + name, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "POST", + "/flows/", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "executionRoleArn" => executionRoleArn, + "name" => name, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_flow_alias(flow_identifier, name, routing_configuration) + create_flow_alias(flow_identifier, name, routing_configuration, params::Dict{String,<:Any}) + +Creates an alias of a flow for deployment. For more information, see Deploy a flow in +Amazon Bedrock in the Amazon Bedrock User Guide. + +# Arguments +- `flow_identifier`: The unique identifier of the flow for which to create an alias. +- `name`: A name for the alias. +- `routing_configuration`: Contains information about the version to which to map the alias. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier to ensure that the API request + completes no more than one time. If this token matches a previous request, Amazon Bedrock + ignores the request, but does not return an error. For more information, see Ensuring + idempotency. +- `"description"`: A description for the alias. +- `"tags"`: Any tags that you want to attach to the alias of the flow. For more + information, see Tagging resources in Amazon Bedrock. +""" +function create_flow_alias( + flowIdentifier, + name, + routingConfiguration; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "POST", + "/flows/$(flowIdentifier)/aliases", + Dict{String,Any}( + "name" => name, + "routingConfiguration" => routingConfiguration, + "clientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_flow_alias( + flowIdentifier, + name, + routingConfiguration, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "POST", + "/flows/$(flowIdentifier)/aliases", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "name" => name, + "routingConfiguration" => routingConfiguration, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_flow_version(flow_identifier) + create_flow_version(flow_identifier, params::Dict{String,<:Any}) + +Creates a version of the flow that you can deploy. For more information, see Deploy a flow +in Amazon Bedrock in the Amazon Bedrock User Guide. + +# Arguments +- `flow_identifier`: The unique identifier of the flow that you want to create a version of. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier to ensure that the API request + completes no more than one time. If this token matches a previous request, Amazon Bedrock + ignores the request, but does not return an error. For more information, see Ensuring + idempotency. +- `"description"`: A description of the version of the flow. +""" +function create_flow_version( + flowIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent( + "POST", + "/flows/$(flowIdentifier)/versions", + Dict{String,Any}("clientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_flow_version( + flowIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "POST", + "/flows/$(flowIdentifier)/versions", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("clientToken" => string(uuid4())), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_knowledge_base(knowledge_base_configuration, name, role_arn, storage_configuration) create_knowledge_base(knowledge_base_configuration, name, role_arn, storage_configuration, params::Dict{String,<:Any}) @@ -445,6 +636,108 @@ function create_knowledge_base( ) end +""" + create_prompt(name) + create_prompt(name, params::Dict{String,<:Any}) + +Creates a prompt in your prompt library that you can add to a flow. For more information, +see Prompt management in Amazon Bedrock, Create a prompt using Prompt management and Prompt +flows in Amazon Bedrock in the Amazon Bedrock User Guide. + +# Arguments +- `name`: A name for the prompt. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier to ensure that the API request + completes no more than one time. If this token matches a previous request, Amazon Bedrock + ignores the request, but does not return an error. For more information, see Ensuring + idempotency. +- `"customerEncryptionKeyArn"`: The Amazon Resource Name (ARN) of the KMS key to encrypt + the prompt. +- `"defaultVariant"`: The name of the default variant for the prompt. This value must match + the name field in the relevant PromptVariant object. +- `"description"`: A description for the prompt. +- `"tags"`: Any tags that you want to attach to the prompt. For more information, see + Tagging resources in Amazon Bedrock. +- `"variants"`: A list of objects, each containing details about a variant of the prompt. +""" +function create_prompt(name; aws_config::AbstractAWSConfig=global_aws_config()) + return bedrock_agent( + "POST", + "/prompts/", + Dict{String,Any}("name" => name, "clientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_prompt( + name, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent( + "POST", + "/prompts/", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("name" => name, "clientToken" => string(uuid4())), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_prompt_version(prompt_identifier) + create_prompt_version(prompt_identifier, params::Dict{String,<:Any}) + +Creates a static snapshot of your prompt that can be deployed to production. For more +information, see Deploy prompts using Prompt management by creating versions in the Amazon +Bedrock User Guide. + +# Arguments +- `prompt_identifier`: The unique identifier of the prompt that you want to create a + version of. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier to ensure that the API request + completes no more than one time. If this token matches a previous request, Amazon Bedrock + ignores the request, but does not return an error. For more information, see Ensuring + idempotency. +- `"description"`: A description for the version of the prompt. +- `"tags"`: Any tags that you want to attach to the version of the prompt. For more + information, see Tagging resources in Amazon Bedrock. +""" +function create_prompt_version( + promptIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent( + "POST", + "/prompts/$(promptIdentifier)/versions", + Dict{String,Any}("clientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_prompt_version( + promptIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "POST", + "/prompts/$(promptIdentifier)/versions", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("clientToken" => string(uuid4())), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_agent(agent_id) delete_agent(agent_id, params::Dict{String,<:Any}) @@ -637,6 +930,120 @@ function delete_data_source( ) end +""" + delete_flow(flow_identifier) + delete_flow(flow_identifier, params::Dict{String,<:Any}) + +Deletes a flow. + +# Arguments +- `flow_identifier`: The unique identifier of the flow. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"skipResourceInUseCheck"`: By default, this value is false and deletion is stopped if + the resource is in use. If you set it to true, the resource will be deleted even if the + resource is in use. +""" +function delete_flow(flowIdentifier; aws_config::AbstractAWSConfig=global_aws_config()) + return bedrock_agent( + "DELETE", + "/flows/$(flowIdentifier)/"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_flow( + flowIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "DELETE", + "/flows/$(flowIdentifier)/", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_flow_alias(alias_identifier, flow_identifier) + delete_flow_alias(alias_identifier, flow_identifier, params::Dict{String,<:Any}) + +Deletes an alias of a flow. + +# Arguments +- `alias_identifier`: The unique identifier of the alias to be deleted. +- `flow_identifier`: The unique identifier of the flow that the alias belongs to. + +""" +function delete_flow_alias( + aliasIdentifier, flowIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent( + "DELETE", + "/flows/$(flowIdentifier)/aliases/$(aliasIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_flow_alias( + aliasIdentifier, + flowIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "DELETE", + "/flows/$(flowIdentifier)/aliases/$(aliasIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_flow_version(flow_identifier, flow_version) + delete_flow_version(flow_identifier, flow_version, params::Dict{String,<:Any}) + +Deletes a version of a flow. + +# Arguments +- `flow_identifier`: The unique identifier of the flow whose version that you want to delete +- `flow_version`: The version of the flow that you want to delete. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"skipResourceInUseCheck"`: By default, this value is false and deletion is stopped if + the resource is in use. If you set it to true, the resource will be deleted even if the + resource is in use. +""" +function delete_flow_version( + flowIdentifier, flowVersion; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent( + "DELETE", + "/flows/$(flowIdentifier)/versions/$(flowVersion)/"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_flow_version( + flowIdentifier, + flowVersion, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "DELETE", + "/flows/$(flowIdentifier)/versions/$(flowVersion)/", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_knowledge_base(knowledge_base_id) delete_knowledge_base(knowledge_base_id, params::Dict{String,<:Any}) @@ -673,6 +1080,43 @@ function delete_knowledge_base( ) end +""" + delete_prompt(prompt_identifier) + delete_prompt(prompt_identifier, params::Dict{String,<:Any}) + +Deletes a prompt or a prompt version from the Prompt management tool. For more information, +see Delete prompts from the Prompt management tool and Delete a version of a prompt from +the Prompt management tool in the Amazon Bedrock User Guide. + +# Arguments +- `prompt_identifier`: The unique identifier of the prompt. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"promptVersion"`: The version of the prompt to delete. +""" +function delete_prompt(promptIdentifier; aws_config::AbstractAWSConfig=global_aws_config()) + return bedrock_agent( + "DELETE", + "/prompts/$(promptIdentifier)/"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_prompt( + promptIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "DELETE", + "/prompts/$(promptIdentifier)/", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ disassociate_agent_knowledge_base(agent_id, agent_version, knowledge_base_id) disassociate_agent_knowledge_base(agent_id, agent_version, knowledge_base_id, params::Dict{String,<:Any}) @@ -926,7 +1370,114 @@ function get_data_source( ) return bedrock_agent( "GET", - "/knowledgebases/$(knowledgeBaseId)/datasources/$(dataSourceId)", + "/knowledgebases/$(knowledgeBaseId)/datasources/$(dataSourceId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_flow(flow_identifier) + get_flow(flow_identifier, params::Dict{String,<:Any}) + +Retrieves information about a flow. For more information, see Manage a flow in Amazon +Bedrock in the Amazon Bedrock User Guide. + +# Arguments +- `flow_identifier`: The unique identifier of the flow. + +""" +function get_flow(flowIdentifier; aws_config::AbstractAWSConfig=global_aws_config()) + return bedrock_agent( + "GET", + "/flows/$(flowIdentifier)/"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_flow( + flowIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "GET", + "/flows/$(flowIdentifier)/", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_flow_alias(alias_identifier, flow_identifier) + get_flow_alias(alias_identifier, flow_identifier, params::Dict{String,<:Any}) + +Retrieves information about a flow. For more information, see Deploy a flow in Amazon +Bedrock in the Amazon Bedrock User Guide. + +# Arguments +- `alias_identifier`: The unique identifier of the alias for which to retrieve information. +- `flow_identifier`: The unique identifier of the flow that the alias belongs to. + +""" +function get_flow_alias( + aliasIdentifier, flowIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent( + "GET", + "/flows/$(flowIdentifier)/aliases/$(aliasIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_flow_alias( + aliasIdentifier, + flowIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "GET", + "/flows/$(flowIdentifier)/aliases/$(aliasIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_flow_version(flow_identifier, flow_version) + get_flow_version(flow_identifier, flow_version, params::Dict{String,<:Any}) + +Retrieves information about a version of a flow. For more information, see Deploy a flow in +Amazon Bedrock in the Amazon Bedrock User Guide. + +# Arguments +- `flow_identifier`: The unique identifier of the flow for which to get information. +- `flow_version`: The version of the flow for which to get information. + +""" +function get_flow_version( + flowIdentifier, flowVersion; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent( + "GET", + "/flows/$(flowIdentifier)/versions/$(flowVersion)/"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_flow_version( + flowIdentifier, + flowVersion, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "GET", + "/flows/$(flowIdentifier)/versions/$(flowVersion)/", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -1010,6 +1561,43 @@ function get_knowledge_base( ) end +""" + get_prompt(prompt_identifier) + get_prompt(prompt_identifier, params::Dict{String,<:Any}) + +Retrieves information about a prompt or a version of it. For more information, see View +information about prompts using Prompt management and View information about a version of +your prompt in the Amazon Bedrock User Guide. + +# Arguments +- `prompt_identifier`: The unique identifier of the prompt. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"promptVersion"`: The version of the prompt about which you want to retrieve information. +""" +function get_prompt(promptIdentifier; aws_config::AbstractAWSConfig=global_aws_config()) + return bedrock_agent( + "GET", + "/prompts/$(promptIdentifier)/"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_prompt( + promptIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "GET", + "/prompts/$(promptIdentifier)/", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_agent_action_groups(agent_id, agent_version) list_agent_action_groups(agent_id, agent_version, params::Dict{String,<:Any}) @@ -1247,6 +1835,120 @@ function list_data_sources( ) end +""" + list_flow_aliases(flow_identifier) + list_flow_aliases(flow_identifier, params::Dict{String,<:Any}) + +Returns a list of aliases for a flow. + +# Arguments +- `flow_identifier`: The unique identifier of the flow for which aliases are being returned. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return in the response. If the total + number of results is greater than this value, use the token returned in the response in the + nextToken field when making another request to return the next batch of results. +- `"nextToken"`: If the total number of results is greater than the maxResults value + provided in the request, enter the token returned in the nextToken field in the response in + this field to return the next batch of results. +""" +function list_flow_aliases( + flowIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent( + "GET", + "/flows/$(flowIdentifier)/aliases"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_flow_aliases( + flowIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "GET", + "/flows/$(flowIdentifier)/aliases", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_flow_versions(flow_identifier) + list_flow_versions(flow_identifier, params::Dict{String,<:Any}) + +Returns a list of information about each flow. For more information, see Deploy a flow in +Amazon Bedrock in the Amazon Bedrock User Guide. + +# Arguments +- `flow_identifier`: The unique identifier of the flow. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return in the response. If the total + number of results is greater than this value, use the token returned in the response in the + nextToken field when making another request to return the next batch of results. +- `"nextToken"`: If the total number of results is greater than the maxResults value + provided in the request, enter the token returned in the nextToken field in the response in + this field to return the next batch of results. +""" +function list_flow_versions( + flowIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent( + "GET", + "/flows/$(flowIdentifier)/versions"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_flow_versions( + flowIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "GET", + "/flows/$(flowIdentifier)/versions", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_flows() + list_flows(params::Dict{String,<:Any}) + +Returns a list of flows and information about each flow. For more information, see Manage a +flow in Amazon Bedrock in the Amazon Bedrock User Guide. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return in the response. If the total + number of results is greater than this value, use the token returned in the response in the + nextToken field when making another request to return the next batch of results. +- `"nextToken"`: If the total number of results is greater than the maxResults value + provided in the request, enter the token returned in the nextToken field in the response in + this field to return the next batch of results. +""" +function list_flows(; aws_config::AbstractAWSConfig=global_aws_config()) + return bedrock_agent( + "GET", "/flows/"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_flows( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent( + "GET", "/flows/", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + """ list_ingestion_jobs(data_source_id, knowledge_base_id) list_ingestion_jobs(data_source_id, knowledge_base_id, params::Dict{String,<:Any}) @@ -1327,6 +2029,37 @@ function list_knowledge_bases( ) end +""" + list_prompts() + list_prompts(params::Dict{String,<:Any}) + +Returns a list of prompts from the Prompt management tool and information about each +prompt. For more information, see View information about prompts using Prompt management in +the Amazon Bedrock User Guide. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return in the response. If the total + number of results is greater than this value, use the token returned in the response in the + nextToken field when making another request to return the next batch of results. +- `"nextToken"`: If the total number of results is greater than the maxResults value + provided in the request, enter the token returned in the nextToken field in the response in + this field to return the next batch of results. +- `"promptIdentifier"`: The unique identifier of the prompt. +""" +function list_prompts(; aws_config::AbstractAWSConfig=global_aws_config()) + return bedrock_agent( + "GET", "/prompts/"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_prompts( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent( + "GET", "/prompts/", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + """ list_tags_for_resource(resource_arn) list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) @@ -1391,6 +2124,39 @@ function prepare_agent( ) end +""" + prepare_flow(flow_identifier) + prepare_flow(flow_identifier, params::Dict{String,<:Any}) + +Prepares the DRAFT version of a flow so that it can be invoked. For more information, see +Test a flow in Amazon Bedrock in the Amazon Bedrock User Guide. + +# Arguments +- `flow_identifier`: The unique identifier of the flow. + +""" +function prepare_flow(flowIdentifier; aws_config::AbstractAWSConfig=global_aws_config()) + return bedrock_agent( + "POST", + "/flows/$(flowIdentifier)/"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function prepare_flow( + flowIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "POST", + "/flows/$(flowIdentifier)/", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ start_ingestion_job(data_source_id, knowledge_base_id) start_ingestion_job(data_source_id, knowledge_base_id, params::Dict{String,<:Any}) @@ -1539,6 +2305,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys expires and Amazon Bedrock deletes any data provided before the timeout. - `"instruction"`: Specifies new instructions that tell the agent what it should do and how it should interact with users. +- `"memoryConfiguration"`: Specifies the new memory configuration for the agent. - `"promptOverrideConfiguration"`: Contains configurations to override prompts in different parts of an agent sequence. For more information, see Advanced prompts. """ @@ -1756,20 +2523,21 @@ end update_data_source(data_source_configuration, data_source_id, knowledge_base_id, name) update_data_source(data_source_configuration, data_source_id, knowledge_base_id, name, params::Dict{String,<:Any}) -Updates configurations for a data source. You can't change the chunkingConfiguration after -you create the data source. Specify the existing chunkingConfiguration. +Updates the configurations for a data source connector. You can't change the +chunkingConfiguration after you create the data source connector. Specify the existing +chunkingConfiguration. # Arguments -- `data_source_configuration`: Contains details about the storage configuration of the data - source. +- `data_source_configuration`: The connection configuration for the data source that you + want to update. - `data_source_id`: The unique identifier of the data source. -- `knowledge_base_id`: The unique identifier of the knowledge base to which the data source - belongs. +- `knowledge_base_id`: The unique identifier of the knowledge base for the data source. - `name`: Specifies a new name for the data source. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"dataDeletionPolicy"`: The data deletion policy of the updated data source. +- `"dataDeletionPolicy"`: The data deletion policy for the data source that you want to + update. - `"description"`: Specifies a new description for the data source. - `"serverSideEncryptionConfiguration"`: Contains details about server-side encryption of the data source. @@ -1818,6 +2586,123 @@ function update_data_source( ) end +""" + update_flow(execution_role_arn, flow_identifier, name) + update_flow(execution_role_arn, flow_identifier, name, params::Dict{String,<:Any}) + +Modifies a flow. Include both fields that you want to keep and fields that you want to +change. For more information, see How it works and Create a flow in Amazon Bedrock in the +Amazon Bedrock User Guide. + +# Arguments +- `execution_role_arn`: The Amazon Resource Name (ARN) of the service role with permissions + to create and manage a flow. For more information, see Create a service role for flows in + Amazon Bedrock in the Amazon Bedrock User Guide. +- `flow_identifier`: The unique identifier of the flow. +- `name`: A name for the flow. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"customerEncryptionKeyArn"`: The Amazon Resource Name (ARN) of the KMS key to encrypt + the flow. +- `"definition"`: A definition of the nodes and the connections between the nodes in the + flow. +- `"description"`: A description for the flow. +""" +function update_flow( + executionRoleArn, + flowIdentifier, + name; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "PUT", + "/flows/$(flowIdentifier)/", + Dict{String,Any}("executionRoleArn" => executionRoleArn, "name" => name); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_flow( + executionRoleArn, + flowIdentifier, + name, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "PUT", + "/flows/$(flowIdentifier)/", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("executionRoleArn" => executionRoleArn, "name" => name), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_flow_alias(alias_identifier, flow_identifier, name, routing_configuration) + update_flow_alias(alias_identifier, flow_identifier, name, routing_configuration, params::Dict{String,<:Any}) + +Modifies the alias of a flow. Include both fields that you want to keep and ones that you +want to change. For more information, see Deploy a flow in Amazon Bedrock in the Amazon +Bedrock User Guide. + +# Arguments +- `alias_identifier`: The unique identifier of the alias. +- `flow_identifier`: The unique identifier of the flow. +- `name`: The name of the flow alias. +- `routing_configuration`: Contains information about the version to which to map the alias. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: A description for the flow alias. +""" +function update_flow_alias( + aliasIdentifier, + flowIdentifier, + name, + routingConfiguration; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "PUT", + "/flows/$(flowIdentifier)/aliases/$(aliasIdentifier)", + Dict{String,Any}("name" => name, "routingConfiguration" => routingConfiguration); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_flow_alias( + aliasIdentifier, + flowIdentifier, + name, + routingConfiguration, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "PUT", + "/flows/$(flowIdentifier)/aliases/$(aliasIdentifier)", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "name" => name, "routingConfiguration" => routingConfiguration + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_knowledge_base(knowledge_base_configuration, knowledge_base_id, name, role_arn, storage_configuration) update_knowledge_base(knowledge_base_configuration, knowledge_base_id, name, role_arn, storage_configuration, params::Dict{String,<:Any}) @@ -1893,3 +2778,50 @@ function update_knowledge_base( feature_set=SERVICE_FEATURE_SET, ) end + +""" + update_prompt(name, prompt_identifier) + update_prompt(name, prompt_identifier, params::Dict{String,<:Any}) + +Modifies a prompt in your prompt library. Include both fields that you want to keep and +fields that you want to replace. For more information, see Prompt management in Amazon +Bedrock and Edit prompts in your prompt library in the Amazon Bedrock User Guide. + +# Arguments +- `name`: A name for the prompt. +- `prompt_identifier`: The unique identifier of the prompt. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"customerEncryptionKeyArn"`: The Amazon Resource Name (ARN) of the KMS key to encrypt + the prompt. +- `"defaultVariant"`: The name of the default variant for the prompt. This value must match + the name field in the relevant PromptVariant object. +- `"description"`: A description for the prompt. +- `"variants"`: A list of objects, each containing details about a variant of the prompt. +""" +function update_prompt( + name, promptIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent( + "PUT", + "/prompts/$(promptIdentifier)/", + Dict{String,Any}("name" => name); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_prompt( + name, + promptIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "PUT", + "/prompts/$(promptIdentifier)/", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("name" => name), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/bedrock_agent_runtime.jl b/src/services/bedrock_agent_runtime.jl index 1468f14ec2..ad3ae6d600 100644 --- a/src/services/bedrock_agent_runtime.jl +++ b/src/services/bedrock_agent_runtime.jl @@ -4,24 +4,123 @@ using AWS.AWSServices: bedrock_agent_runtime using AWS.Compat using AWS.UUIDs +""" + delete_agent_memory(agent_alias_id, agent_id) + delete_agent_memory(agent_alias_id, agent_id, params::Dict{String,<:Any}) + +Deletes memory from the specified memory identifier. + +# Arguments +- `agent_alias_id`: The unique identifier of an alias of an agent. +- `agent_id`: The unique identifier of the agent to which the alias belongs. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"memoryId"`: The unique identifier of the memory. +""" +function delete_agent_memory( + agentAliasId, agentId; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent_runtime( + "DELETE", + "/agents/$(agentId)/agentAliases/$(agentAliasId)/memories"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_agent_memory( + agentAliasId, + agentId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent_runtime( + "DELETE", + "/agents/$(agentId)/agentAliases/$(agentAliasId)/memories", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_agent_memory(agent_alias_id, agent_id, memory_id, memory_type) + get_agent_memory(agent_alias_id, agent_id, memory_id, memory_type, params::Dict{String,<:Any}) + +Gets the sessions stored in the memory of the agent. + +# Arguments +- `agent_alias_id`: The unique identifier of an alias of an agent. +- `agent_id`: The unique identifier of the agent to which the alias belongs. +- `memory_id`: The unique identifier of the memory. +- `memory_type`: The type of memory. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxItems"`: The maximum number of items to return in the response. If the total number + of results is greater than this value, use the token returned in the response in the + nextToken field when making another request to return the next batch of results. +- `"nextToken"`: If the total number of results is greater than the maxItems value provided + in the request, enter the token returned in the nextToken field in the response in this + field to return the next batch of results. +""" +function get_agent_memory( + agentAliasId, + agentId, + memoryId, + memoryType; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent_runtime( + "GET", + "/agents/$(agentId)/agentAliases/$(agentAliasId)/memories", + Dict{String,Any}("memoryId" => memoryId, "memoryType" => memoryType); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_agent_memory( + agentAliasId, + agentId, + memoryId, + memoryType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent_runtime( + "GET", + "/agents/$(agentId)/agentAliases/$(agentAliasId)/memories", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("memoryId" => memoryId, "memoryType" => memoryType), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ invoke_agent(agent_alias_id, agent_id, session_id) invoke_agent(agent_alias_id, agent_id, session_id, params::Dict{String,<:Any}) - The CLI doesn't support InvokeAgent. Sends a prompt for the agent to process and respond -to. Note the following fields for the request: To continue the same conversation with an -agent, use the same sessionId value in the request. To activate trace enablement, turn -enableTrace to true. Trace enablement helps you follow the agent's reasoning process that -led it to the information it processed, the actions it took, and the final result it -yielded. For more information, see Trace enablement. End a conversation by setting -endSession to true. In the sessionState object, you can include attributes for the -session or prompt or, if you configured an action group to return control, results from -invocation of the action group. The response is returned in the bytes field of the chunk -object. The attribution object contains citations for parts of the response. If you set -enableTrace to true in the request, you can trace the agent's steps and reasoning process -that led it to the response. If the action predicted was configured to return control, -the response returns parameters for the action, elicited from the user, in the -returnControl field. Errors are also surfaced in the response. + The CLI doesn't support streaming operations in Amazon Bedrock, including InvokeAgent. +Sends a prompt for the agent to process and respond to. Note the following fields for the +request: To continue the same conversation with an agent, use the same sessionId value in +the request. To activate trace enablement, turn enableTrace to true. Trace enablement +helps you follow the agent's reasoning process that led it to the information it processed, +the actions it took, and the final result it yielded. For more information, see Trace +enablement. End a conversation by setting endSession to true. In the sessionState +object, you can include attributes for the session or prompt or, if you configured an +action group to return control, results from invocation of the action group. The response +is returned in the bytes field of the chunk object. The attribution object contains +citations for parts of the response. If you set enableTrace to true in the request, you +can trace the agent's steps and reasoning process that led it to the response. If the +action predicted was configured to return control, the response returns parameters for the +action, elicited from the user, in the returnControl field. Errors are also surfaced in +the response. # Arguments - `agent_alias_id`: The alias of the agent to use. @@ -37,6 +136,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"inputText"`: The prompt text to send the agent. If you include returnControlInvocationResults in the sessionState field, the inputText field will be ignored. +- `"memoryId"`: The unique identifier of the agent memory. - `"sessionState"`: Contains parameters that specify various attributes of the session. For more information, see Control session context. If you include returnControlInvocationResults in the sessionState field, the inputText field will be @@ -68,6 +168,50 @@ function invoke_agent( ) end +""" + invoke_flow(flow_alias_identifier, flow_identifier, inputs) + invoke_flow(flow_alias_identifier, flow_identifier, inputs, params::Dict{String,<:Any}) + +Invokes an alias of a flow to run the inputs that you specify and return the output of each +node as a stream. If there's an error, the error is returned. For more information, see +Test a flow in Amazon Bedrock in the Amazon Bedrock User Guide. + +# Arguments +- `flow_alias_identifier`: The unique identifier of the flow alias. +- `flow_identifier`: The unique identifier of the flow. +- `inputs`: A list of objects, each containing information about an input into the flow. + +""" +function invoke_flow( + flowAliasIdentifier, + flowIdentifier, + inputs; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent_runtime( + "POST", + "/flows/$(flowIdentifier)/aliases/$(flowAliasIdentifier)", + Dict{String,Any}("inputs" => inputs); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function invoke_flow( + flowAliasIdentifier, + flowIdentifier, + inputs, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent_runtime( + "POST", + "/flows/$(flowIdentifier)/aliases/$(flowAliasIdentifier)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("inputs" => inputs), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ retrieve(knowledge_base_id, retrieval_query) retrieve(knowledge_base_id, retrieval_query, params::Dict{String,<:Any}) @@ -129,8 +273,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"retrieveAndGenerateConfiguration"`: Contains configurations for the knowledge base query and retrieval process. For more information, see Query configurations. - `"sessionConfiguration"`: Contains details about the session with the knowledge base. -- `"sessionId"`: The unique identifier of the session. Reuse the same value to continue the - same session with the knowledge base. +- `"sessionId"`: The unique identifier of the session. When you first make a + RetrieveAndGenerate request, Amazon Bedrock automatically generates this value. You must + reuse this value for all subsequent requests in the same conversational session. This value + allows Amazon Bedrock to maintain context and knowledge from previous interactions. You + can't explicitly set the sessionId yourself. """ function retrieve_and_generate(input; aws_config::AbstractAWSConfig=global_aws_config()) return bedrock_agent_runtime( diff --git a/src/services/bedrock_runtime.jl b/src/services/bedrock_runtime.jl index 3f4cc4d7c6..d6f147126c 100644 --- a/src/services/bedrock_runtime.jl +++ b/src/services/bedrock_runtime.jl @@ -4,6 +4,56 @@ using AWS.AWSServices: bedrock_runtime using AWS.Compat using AWS.UUIDs +""" + apply_guardrail(content, guardrail_identifier, guardrail_version, source) + apply_guardrail(content, guardrail_identifier, guardrail_version, source, params::Dict{String,<:Any}) + +The action to apply a guardrail. + +# Arguments +- `content`: The content details used in the request to apply the guardrail. +- `guardrail_identifier`: The guardrail identifier used in the request to apply the + guardrail. +- `guardrail_version`: The guardrail version used in the request to apply the guardrail. +- `source`: The source of data used in the request to apply the guardrail. + +""" +function apply_guardrail( + content, + guardrailIdentifier, + guardrailVersion, + source; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_runtime( + "POST", + "/guardrail/$(guardrailIdentifier)/version/$(guardrailVersion)/apply", + Dict{String,Any}("content" => content, "source" => source); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function apply_guardrail( + content, + guardrailIdentifier, + guardrailVersion, + source, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_runtime( + "POST", + "/guardrail/$(guardrailIdentifier)/version/$(guardrailVersion)/apply", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("content" => content, "source" => source), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ converse(messages, model_id) converse(messages, model_id, params::Dict{String,<:Any}) diff --git a/src/services/chime_sdk_media_pipelines.jl b/src/services/chime_sdk_media_pipelines.jl index 3c9ec4e109..39777491d0 100644 --- a/src/services/chime_sdk_media_pipelines.jl +++ b/src/services/chime_sdk_media_pipelines.jl @@ -320,16 +320,26 @@ end create_media_pipeline_kinesis_video_stream_pool(pool_name, stream_configuration) create_media_pipeline_kinesis_video_stream_pool(pool_name, stream_configuration, params::Dict{String,<:Any}) -Creates an Kinesis video stream pool for the media pipeline. +Creates an Amazon Kinesis Video Stream pool for use with media stream pipelines. If a +meeting uses an opt-in Region as its MediaRegion, the KVS stream must be in that same +Region. For example, if a meeting uses the af-south-1 Region, the KVS stream must also be +in af-south-1. However, if the meeting uses a Region that AWS turns on by default, the KVS +stream can be in any available Region, including an opt-in Region. For example, if the +meeting uses ca-central-1, the KVS stream can be in eu-west-2, us-east-1, af-south-1, or +any other Region that the Amazon Chime SDK supports. To learn which AWS Region a meeting +uses, call the GetMeeting API and use the MediaRegion parameter from the response. For more +information about opt-in Regions, refer to Available Regions in the Amazon Chime SDK +Developer Guide, and Specify which AWS Regions your account can use, in the AWS Account +Management Reference Guide. # Arguments -- `pool_name`: The name of the video stream pool. -- `stream_configuration`: The configuration settings for the video stream. +- `pool_name`: The name of the pool. +- `stream_configuration`: The configuration settings for the stream. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"ClientRequestToken"`: The token assigned to the client making the request. -- `"Tags"`: The tags assigned to the video stream pool. +- `"Tags"`: The tags assigned to the stream pool. """ function create_media_pipeline_kinesis_video_stream_pool( PoolName, StreamConfiguration; aws_config::AbstractAWSConfig=global_aws_config() @@ -531,10 +541,11 @@ end delete_media_pipeline_kinesis_video_stream_pool(identifier) delete_media_pipeline_kinesis_video_stream_pool(identifier, params::Dict{String,<:Any}) -Deletes an Kinesis video stream pool. +Deletes an Amazon Kinesis Video Stream pool. # Arguments -- `identifier`: The ID of the pool being deleted. +- `identifier`: The unique identifier of the requested resource. Valid values include the + name and ARN of the media pipeline Kinesis Video Stream pool. """ function delete_media_pipeline_kinesis_video_stream_pool( @@ -671,7 +682,8 @@ end Gets an Kinesis video stream pool. # Arguments -- `identifier`: The ID of the video stream pool. +- `identifier`: The unique identifier of the requested resource. Valid values include the + name and ARN of the media pipeline Kinesis Video Stream pool. """ function get_media_pipeline_kinesis_video_stream_pool( @@ -1314,10 +1326,11 @@ end update_media_pipeline_kinesis_video_stream_pool(identifier) update_media_pipeline_kinesis_video_stream_pool(identifier, params::Dict{String,<:Any}) -Updates an Kinesis video stream pool in a media pipeline. +Updates an Amazon Kinesis Video Stream pool in a media pipeline. # Arguments -- `identifier`: The ID of the video stream pool. +- `identifier`: The unique identifier of the requested resource. Valid values include the + name and ARN of the media pipeline Kinesis Video Stream pool. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: diff --git a/src/services/cleanrooms.jl b/src/services/cleanrooms.jl index 0d2c9cc8f5..175829ac3d 100644 --- a/src/services/cleanrooms.jl +++ b/src/services/cleanrooms.jl @@ -440,7 +440,8 @@ Creates a new analysis rule for a configured table. Currently, only one analysis be created for a given configured table. # Arguments -- `analysis_rule_policy`: The entire created configured table analysis rule object. +- `analysis_rule_policy`: The analysis rule policy that was created for the configured + table. - `analysis_rule_type`: The type of analysis rule. - `configured_table_identifier`: The identifier for the configured table to create the analysis rule for. Currently accepts the configured table ID. @@ -560,6 +561,187 @@ function create_configured_table_association( ) end +""" + create_configured_table_association_analysis_rule(analysis_rule_policy, analysis_rule_type, configured_table_association_identifier, membership_identifier) + create_configured_table_association_analysis_rule(analysis_rule_policy, analysis_rule_type, configured_table_association_identifier, membership_identifier, params::Dict{String,<:Any}) + + Creates a new analysis rule for an associated configured table. + +# Arguments +- `analysis_rule_policy`: The analysis rule policy that was created for the configured + table association. +- `analysis_rule_type`: The type of analysis rule. +- `configured_table_association_identifier`: The unique ID for the configured table + association. Currently accepts the configured table association ID. +- `membership_identifier`: A unique identifier for the membership that the configured + table association belongs to. Currently accepts the membership ID. + +""" +function create_configured_table_association_analysis_rule( + analysisRulePolicy, + analysisRuleType, + configuredTableAssociationIdentifier, + membershipIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "POST", + "/memberships/$(membershipIdentifier)/configuredTableAssociations/$(configuredTableAssociationIdentifier)/analysisRule", + Dict{String,Any}( + "analysisRulePolicy" => analysisRulePolicy, + "analysisRuleType" => analysisRuleType, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_configured_table_association_analysis_rule( + analysisRulePolicy, + analysisRuleType, + configuredTableAssociationIdentifier, + membershipIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "POST", + "/memberships/$(membershipIdentifier)/configuredTableAssociations/$(configuredTableAssociationIdentifier)/analysisRule", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "analysisRulePolicy" => analysisRulePolicy, + "analysisRuleType" => analysisRuleType, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_id_mapping_table(input_reference_config, membership_identifier, name) + create_id_mapping_table(input_reference_config, membership_identifier, name, params::Dict{String,<:Any}) + +Creates an ID mapping table. + +# Arguments +- `input_reference_config`: The input reference configuration needed to create the ID + mapping table. +- `membership_identifier`: The unique identifier of the membership that contains the ID + mapping table. +- `name`: A name for the ID mapping table. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: A description of the ID mapping table. +- `"kmsKeyArn"`: The Amazon Resource Name (ARN) of the Amazon Web Services KMS key. This + value is used to encrypt the mapping table data that is stored by Clean Rooms. +- `"tags"`: An optional label that you can assign to a resource when you create it. Each + tag consists of a key and an optional value, both of which you define. When you use + tagging, you can also use tag-based access control in IAM policies to control access to + this resource. +""" +function create_id_mapping_table( + inputReferenceConfig, + membershipIdentifier, + name; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "POST", + "/memberships/$(membershipIdentifier)/idmappingtables", + Dict{String,Any}("inputReferenceConfig" => inputReferenceConfig, "name" => name); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_id_mapping_table( + inputReferenceConfig, + membershipIdentifier, + name, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "POST", + "/memberships/$(membershipIdentifier)/idmappingtables", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "inputReferenceConfig" => inputReferenceConfig, "name" => name + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_id_namespace_association(input_reference_config, membership_identifier, name) + create_id_namespace_association(input_reference_config, membership_identifier, name, params::Dict{String,<:Any}) + +Creates an ID namespace association. + +# Arguments +- `input_reference_config`: The input reference configuration needed to create the ID + namespace association. +- `membership_identifier`: The unique identifier of the membership that contains the ID + namespace association. +- `name`: The name for the ID namespace association. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: The description of the ID namespace association. +- `"idMappingConfig"`: The configuration settings for the ID mapping table. +- `"tags"`: An optional label that you can assign to a resource when you create it. Each + tag consists of a key and an optional value, both of which you define. When you use + tagging, you can also use tag-based access control in IAM policies to control access to + this resource. +""" +function create_id_namespace_association( + inputReferenceConfig, + membershipIdentifier, + name; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "POST", + "/memberships/$(membershipIdentifier)/idnamespaceassociations", + Dict{String,Any}("inputReferenceConfig" => inputReferenceConfig, "name" => name); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_id_namespace_association( + inputReferenceConfig, + membershipIdentifier, + name, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "POST", + "/memberships/$(membershipIdentifier)/idnamespaceassociations", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "inputReferenceConfig" => inputReferenceConfig, "name" => name + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_membership(collaboration_identifier, query_log_status) create_membership(collaboration_identifier, query_log_status, params::Dict{String,<:Any}) @@ -924,6 +1106,129 @@ function delete_configured_table_association( ) end +""" + delete_configured_table_association_analysis_rule(analysis_rule_type, configured_table_association_identifier, membership_identifier) + delete_configured_table_association_analysis_rule(analysis_rule_type, configured_table_association_identifier, membership_identifier, params::Dict{String,<:Any}) + +Deletes an analysis rule for a configured table association. + +# Arguments +- `analysis_rule_type`: The type of the analysis rule that you want to delete. +- `configured_table_association_identifier`: The identifier for the configured table + association that's related to the analysis rule that you want to delete. +- `membership_identifier`: A unique identifier for the membership that the configured + table association belongs to. Currently accepts the membership ID. + +""" +function delete_configured_table_association_analysis_rule( + analysisRuleType, + configuredTableAssociationIdentifier, + membershipIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "DELETE", + "/memberships/$(membershipIdentifier)/configuredTableAssociations/$(configuredTableAssociationIdentifier)/analysisRule/$(analysisRuleType)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_configured_table_association_analysis_rule( + analysisRuleType, + configuredTableAssociationIdentifier, + membershipIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "DELETE", + "/memberships/$(membershipIdentifier)/configuredTableAssociations/$(configuredTableAssociationIdentifier)/analysisRule/$(analysisRuleType)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_id_mapping_table(id_mapping_table_identifier, membership_identifier) + delete_id_mapping_table(id_mapping_table_identifier, membership_identifier, params::Dict{String,<:Any}) + +Deletes an ID mapping table. + +# Arguments +- `id_mapping_table_identifier`: The unique identifier of the ID mapping table that you + want to delete. +- `membership_identifier`: The unique identifier of the membership that contains the ID + mapping table that you want to delete. + +""" +function delete_id_mapping_table( + idMappingTableIdentifier, + membershipIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "DELETE", + "/memberships/$(membershipIdentifier)/idmappingtables/$(idMappingTableIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_id_mapping_table( + idMappingTableIdentifier, + membershipIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "DELETE", + "/memberships/$(membershipIdentifier)/idmappingtables/$(idMappingTableIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_id_namespace_association(id_namespace_association_identifier, membership_identifier) + delete_id_namespace_association(id_namespace_association_identifier, membership_identifier, params::Dict{String,<:Any}) + +Deletes an ID namespace association. + +# Arguments +- `id_namespace_association_identifier`: The unique identifier of the ID namespace + association that you want to delete. +- `membership_identifier`: The unique identifier of the membership that contains the ID + namespace association that you want to delete. + +""" +function delete_id_namespace_association( + idNamespaceAssociationIdentifier, + membershipIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "DELETE", + "/memberships/$(membershipIdentifier)/idnamespaceassociations/$(idNamespaceAssociationIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_id_namespace_association( + idNamespaceAssociationIdentifier, + membershipIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "DELETE", + "/memberships/$(membershipIdentifier)/idnamespaceassociations/$(idNamespaceAssociationIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_member(account_id, collaboration_identifier) delete_member(account_id, collaboration_identifier, params::Dict{String,<:Any}) @@ -1189,6 +1494,46 @@ function get_collaboration_configured_audience_model_association( ) end +""" + get_collaboration_id_namespace_association(collaboration_identifier, id_namespace_association_identifier) + get_collaboration_id_namespace_association(collaboration_identifier, id_namespace_association_identifier, params::Dict{String,<:Any}) + +Retrieves an ID namespace association from a specific collaboration. + +# Arguments +- `collaboration_identifier`: The unique identifier of the collaboration that contains the + ID namespace association that you want to retrieve. +- `id_namespace_association_identifier`: The unique identifier of the ID namespace + association that you want to retrieve. + +""" +function get_collaboration_id_namespace_association( + collaborationIdentifier, + idNamespaceAssociationIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "GET", + "/collaborations/$(collaborationIdentifier)/idnamespaceassociations/$(idNamespaceAssociationIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_collaboration_id_namespace_association( + collaborationIdentifier, + idNamespaceAssociationIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "GET", + "/collaborations/$(collaborationIdentifier)/idnamespaceassociations/$(idNamespaceAssociationIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_collaboration_privacy_budget_template(collaboration_identifier, privacy_budget_template_identifier) get_collaboration_privacy_budget_template(collaboration_identifier, privacy_budget_template_identifier, params::Dict{String,<:Any}) @@ -1283,19 +1628,142 @@ function get_configured_table( ) return cleanrooms( "GET", - "/configuredTables/$(configuredTableIdentifier)"; + "/configuredTables/$(configuredTableIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_configured_table( + configuredTableIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "GET", + "/configuredTables/$(configuredTableIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_configured_table_analysis_rule(analysis_rule_type, configured_table_identifier) + get_configured_table_analysis_rule(analysis_rule_type, configured_table_identifier, params::Dict{String,<:Any}) + +Retrieves a configured table analysis rule. + +# Arguments +- `analysis_rule_type`: The analysis rule to be retrieved. Configured table analysis rules + are uniquely identified by their configured table identifier and analysis rule type. +- `configured_table_identifier`: The unique identifier for the configured table to + retrieve. Currently accepts the configured table ID. + +""" +function get_configured_table_analysis_rule( + analysisRuleType, + configuredTableIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "GET", + "/configuredTables/$(configuredTableIdentifier)/analysisRule/$(analysisRuleType)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_configured_table_analysis_rule( + analysisRuleType, + configuredTableIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "GET", + "/configuredTables/$(configuredTableIdentifier)/analysisRule/$(analysisRuleType)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_configured_table_association(configured_table_association_identifier, membership_identifier) + get_configured_table_association(configured_table_association_identifier, membership_identifier, params::Dict{String,<:Any}) + +Retrieves a configured table association. + +# Arguments +- `configured_table_association_identifier`: The unique ID for the configured table + association to retrieve. Currently accepts the configured table ID. +- `membership_identifier`: A unique identifier for the membership that the configured table + association belongs to. Currently accepts the membership ID. + +""" +function get_configured_table_association( + configuredTableAssociationIdentifier, + membershipIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "GET", + "/memberships/$(membershipIdentifier)/configuredTableAssociations/$(configuredTableAssociationIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_configured_table_association( + configuredTableAssociationIdentifier, + membershipIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "GET", + "/memberships/$(membershipIdentifier)/configuredTableAssociations/$(configuredTableAssociationIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_configured_table_association_analysis_rule(analysis_rule_type, configured_table_association_identifier, membership_identifier) + get_configured_table_association_analysis_rule(analysis_rule_type, configured_table_association_identifier, membership_identifier, params::Dict{String,<:Any}) + + Retrieves the analysis rule for a configured table association. + +# Arguments +- `analysis_rule_type`: The type of analysis rule that you want to retrieve. +- `configured_table_association_identifier`: The identifier for the configured table + association that's related to the analysis rule. +- `membership_identifier`: A unique identifier for the membership that the configured + table association belongs to. Currently accepts the membership ID. + +""" +function get_configured_table_association_analysis_rule( + analysisRuleType, + configuredTableAssociationIdentifier, + membershipIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "GET", + "/memberships/$(membershipIdentifier)/configuredTableAssociations/$(configuredTableAssociationIdentifier)/analysisRule/$(analysisRuleType)"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function get_configured_table( - configuredTableIdentifier, +function get_configured_table_association_analysis_rule( + analysisRuleType, + configuredTableAssociationIdentifier, + membershipIdentifier, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return cleanrooms( "GET", - "/configuredTables/$(configuredTableIdentifier)", + "/memberships/$(membershipIdentifier)/configuredTableAssociations/$(configuredTableAssociationIdentifier)/analysisRule/$(analysisRuleType)", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -1303,39 +1771,39 @@ function get_configured_table( end """ - get_configured_table_analysis_rule(analysis_rule_type, configured_table_identifier) - get_configured_table_analysis_rule(analysis_rule_type, configured_table_identifier, params::Dict{String,<:Any}) + get_id_mapping_table(id_mapping_table_identifier, membership_identifier) + get_id_mapping_table(id_mapping_table_identifier, membership_identifier, params::Dict{String,<:Any}) -Retrieves a configured table analysis rule. +Retrieves an ID mapping table. # Arguments -- `analysis_rule_type`: The analysis rule to be retrieved. Configured table analysis rules - are uniquely identified by their configured table identifier and analysis rule type. -- `configured_table_identifier`: The unique identifier for the configured table to - retrieve. Currently accepts the configured table ID. +- `id_mapping_table_identifier`: The unique identifier of the ID mapping table identifier + that you want to retrieve. +- `membership_identifier`: The unique identifier of the membership that contains the ID + mapping table that you want to retrieve. """ -function get_configured_table_analysis_rule( - analysisRuleType, - configuredTableIdentifier; +function get_id_mapping_table( + idMappingTableIdentifier, + membershipIdentifier; aws_config::AbstractAWSConfig=global_aws_config(), ) return cleanrooms( "GET", - "/configuredTables/$(configuredTableIdentifier)/analysisRule/$(analysisRuleType)"; + "/memberships/$(membershipIdentifier)/idmappingtables/$(idMappingTableIdentifier)"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function get_configured_table_analysis_rule( - analysisRuleType, - configuredTableIdentifier, +function get_id_mapping_table( + idMappingTableIdentifier, + membershipIdentifier, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return cleanrooms( "GET", - "/configuredTables/$(configuredTableIdentifier)/analysisRule/$(analysisRuleType)", + "/memberships/$(membershipIdentifier)/idmappingtables/$(idMappingTableIdentifier)", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -1343,39 +1811,39 @@ function get_configured_table_analysis_rule( end """ - get_configured_table_association(configured_table_association_identifier, membership_identifier) - get_configured_table_association(configured_table_association_identifier, membership_identifier, params::Dict{String,<:Any}) + get_id_namespace_association(id_namespace_association_identifier, membership_identifier) + get_id_namespace_association(id_namespace_association_identifier, membership_identifier, params::Dict{String,<:Any}) -Retrieves a configured table association. +Retrieves an ID namespace association. # Arguments -- `configured_table_association_identifier`: The unique ID for the configured table - association to retrieve. Currently accepts the configured table ID. -- `membership_identifier`: A unique identifier for the membership that the configured table - association belongs to. Currently accepts the membership ID. +- `id_namespace_association_identifier`: The unique identifier of the ID namespace + association that you want to retrieve. +- `membership_identifier`: The unique identifier of the membership that contains the ID + namespace association that you want to retrieve. """ -function get_configured_table_association( - configuredTableAssociationIdentifier, +function get_id_namespace_association( + idNamespaceAssociationIdentifier, membershipIdentifier; aws_config::AbstractAWSConfig=global_aws_config(), ) return cleanrooms( "GET", - "/memberships/$(membershipIdentifier)/configuredTableAssociations/$(configuredTableAssociationIdentifier)"; + "/memberships/$(membershipIdentifier)/idnamespaceassociations/$(idNamespaceAssociationIdentifier)"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function get_configured_table_association( - configuredTableAssociationIdentifier, +function get_id_namespace_association( + idNamespaceAssociationIdentifier, membershipIdentifier, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return cleanrooms( "GET", - "/memberships/$(membershipIdentifier)/configuredTableAssociations/$(configuredTableAssociationIdentifier)", + "/memberships/$(membershipIdentifier)/idnamespaceassociations/$(idNamespaceAssociationIdentifier)", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -1691,6 +2159,47 @@ function list_collaboration_configured_audience_model_associations( ) end +""" + list_collaboration_id_namespace_associations(collaboration_identifier) + list_collaboration_id_namespace_associations(collaboration_identifier, params::Dict{String,<:Any}) + +Returns a list of the ID namespace associations in a collaboration. + +# Arguments +- `collaboration_identifier`: The unique identifier of the collaboration that contains the + ID namespace associations that you want to retrieve. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum size of the results that is returned per call. Service + chooses a default if it has not been set. Service may return a nextToken even if the + maximum results has not been met.> +- `"nextToken"`: The pagination token that's used to fetch the next set of results. +""" +function list_collaboration_id_namespace_associations( + collaborationIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return cleanrooms( + "GET", + "/collaborations/$(collaborationIdentifier)/idnamespaceassociations"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_collaboration_id_namespace_associations( + collaborationIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "GET", + "/collaborations/$(collaborationIdentifier)/idnamespaceassociations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_collaboration_privacy_budget_templates(collaboration_identifier) list_collaboration_privacy_budget_templates(collaboration_identifier, params::Dict{String,<:Any}) @@ -1927,6 +2436,88 @@ function list_configured_tables( ) end +""" + list_id_mapping_tables(membership_identifier) + list_id_mapping_tables(membership_identifier, params::Dict{String,<:Any}) + +Returns a list of ID mapping tables. + +# Arguments +- `membership_identifier`: The unique identifier of the membership that contains the ID + mapping tables that you want to view. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum size of the results that is returned per call. Service + chooses a default if it has not been set. Service may return a nextToken even if the + maximum results has not been met. +- `"nextToken"`: The pagination token that's used to fetch the next set of results. +""" +function list_id_mapping_tables( + membershipIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return cleanrooms( + "GET", + "/memberships/$(membershipIdentifier)/idmappingtables"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_id_mapping_tables( + membershipIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "GET", + "/memberships/$(membershipIdentifier)/idmappingtables", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_id_namespace_associations(membership_identifier) + list_id_namespace_associations(membership_identifier, params::Dict{String,<:Any}) + +Returns a list of ID namespace associations. + +# Arguments +- `membership_identifier`: The unique identifier of the membership that contains the ID + namespace association that you want to view. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum size of the results that is returned per call. Service + chooses a default if it has not been set. Service may return a nextToken even if the + maximum results has not been met. +- `"nextToken"`: The pagination token that's used to fetch the next set of results. +""" +function list_id_namespace_associations( + membershipIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return cleanrooms( + "GET", + "/memberships/$(membershipIdentifier)/idnamespaceassociations"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_id_namespace_associations( + membershipIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "GET", + "/memberships/$(membershipIdentifier)/idnamespaceassociations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_members(collaboration_identifier) list_members(collaboration_identifier, params::Dict{String,<:Any}) @@ -2211,6 +2802,46 @@ function list_tags_for_resource( ) end +""" + populate_id_mapping_table(id_mapping_table_identifier, membership_identifier) + populate_id_mapping_table(id_mapping_table_identifier, membership_identifier, params::Dict{String,<:Any}) + +Defines the information that's necessary to populate an ID mapping table. + +# Arguments +- `id_mapping_table_identifier`: The unique identifier of the ID mapping table that you + want to populate. +- `membership_identifier`: The unique identifier of the membership that contains the ID + mapping table that you want to populate. + +""" +function populate_id_mapping_table( + idMappingTableIdentifier, + membershipIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "POST", + "/memberships/$(membershipIdentifier)/idmappingtables/$(idMappingTableIdentifier)/populate"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function populate_id_mapping_table( + idMappingTableIdentifier, + membershipIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "POST", + "/memberships/$(membershipIdentifier)/idmappingtables/$(idMappingTableIdentifier)/populate", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ preview_privacy_impact(membership_identifier, parameters) preview_privacy_impact(membership_identifier, parameters, params::Dict{String,<:Any}) @@ -2635,6 +3266,147 @@ function update_configured_table_association( ) end +""" + update_configured_table_association_analysis_rule(analysis_rule_policy, analysis_rule_type, configured_table_association_identifier, membership_identifier) + update_configured_table_association_analysis_rule(analysis_rule_policy, analysis_rule_type, configured_table_association_identifier, membership_identifier, params::Dict{String,<:Any}) + + Updates the analysis rule for a configured table association. + +# Arguments +- `analysis_rule_policy`: The updated analysis rule policy for the configured table + association. +- `analysis_rule_type`: The analysis rule type that you want to update. +- `configured_table_association_identifier`: The identifier for the configured table + association to update. +- `membership_identifier`: A unique identifier for the membership that the configured + table association belongs to. Currently accepts the membership ID. + +""" +function update_configured_table_association_analysis_rule( + analysisRulePolicy, + analysisRuleType, + configuredTableAssociationIdentifier, + membershipIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "PATCH", + "/memberships/$(membershipIdentifier)/configuredTableAssociations/$(configuredTableAssociationIdentifier)/analysisRule/$(analysisRuleType)", + Dict{String,Any}("analysisRulePolicy" => analysisRulePolicy); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_configured_table_association_analysis_rule( + analysisRulePolicy, + analysisRuleType, + configuredTableAssociationIdentifier, + membershipIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "PATCH", + "/memberships/$(membershipIdentifier)/configuredTableAssociations/$(configuredTableAssociationIdentifier)/analysisRule/$(analysisRuleType)", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("analysisRulePolicy" => analysisRulePolicy), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_id_mapping_table(id_mapping_table_identifier, membership_identifier) + update_id_mapping_table(id_mapping_table_identifier, membership_identifier, params::Dict{String,<:Any}) + +Provides the details that are necessary to update an ID mapping table. + +# Arguments +- `id_mapping_table_identifier`: The unique identifier of the ID mapping table that you + want to update. +- `membership_identifier`: The unique identifier of the membership that contains the ID + mapping table that you want to update. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: A new description for the ID mapping table. +- `"kmsKeyArn"`: The Amazon Resource Name (ARN) of the Amazon Web Services KMS key. +""" +function update_id_mapping_table( + idMappingTableIdentifier, + membershipIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "PATCH", + "/memberships/$(membershipIdentifier)/idmappingtables/$(idMappingTableIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_id_mapping_table( + idMappingTableIdentifier, + membershipIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "PATCH", + "/memberships/$(membershipIdentifier)/idmappingtables/$(idMappingTableIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_id_namespace_association(id_namespace_association_identifier, membership_identifier) + update_id_namespace_association(id_namespace_association_identifier, membership_identifier, params::Dict{String,<:Any}) + +Provides the details that are necessary to update an ID namespace association. + +# Arguments +- `id_namespace_association_identifier`: The unique identifier of the ID namespace + association that you want to update. +- `membership_identifier`: The unique identifier of the membership that contains the ID + namespace association that you want to update. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: A new description for the ID namespace association. +- `"idMappingConfig"`: The configuration settings for the ID mapping table. +- `"name"`: A new name for the ID namespace association. +""" +function update_id_namespace_association( + idNamespaceAssociationIdentifier, + membershipIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "PATCH", + "/memberships/$(membershipIdentifier)/idnamespaceassociations/$(idNamespaceAssociationIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_id_namespace_association( + idNamespaceAssociationIdentifier, + membershipIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "PATCH", + "/memberships/$(membershipIdentifier)/idnamespaceassociations/$(idNamespaceAssociationIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_membership(membership_identifier) update_membership(membership_identifier, params::Dict{String,<:Any}) diff --git a/src/services/cloudfront.jl b/src/services/cloudfront.jl index 21bf5b85ff..1ce2a1b870 100644 --- a/src/services/cloudfront.jl +++ b/src/services/cloudfront.jl @@ -3692,7 +3692,8 @@ end list_tags_for_resource2020_05_31(resource) list_tags_for_resource2020_05_31(resource, params::Dict{String,<:Any}) -List tags for a CloudFront resource. +List tags for a CloudFront resource. For more information, see Tagging a distribution in +the Amazon CloudFront Developer Guide. # Arguments - `resource`: An ARN of a CloudFront resource. @@ -3778,7 +3779,8 @@ end tag_resource2020_05_31(resource, tags) tag_resource2020_05_31(resource, tags, params::Dict{String,<:Any}) -Add tags to a CloudFront resource. +Add tags to a CloudFront resource. For more information, see Tagging a distribution in the +Amazon CloudFront Developer Guide. # Arguments - `resource`: An ARN of a CloudFront resource. @@ -3884,7 +3886,8 @@ end untag_resource2020_05_31(resource, tag_keys) untag_resource2020_05_31(resource, tag_keys, params::Dict{String,<:Any}) -Remove tags from a CloudFront resource. +Remove tags from a CloudFront resource. For more information, see Tagging a distribution in +the Amazon CloudFront Developer Guide. # Arguments - `resource`: An ARN of a CloudFront resource. diff --git a/src/services/cloudhsm_v2.jl b/src/services/cloudhsm_v2.jl index 55326e9439..6e20bec62a 100644 --- a/src/services/cloudhsm_v2.jl +++ b/src/services/cloudhsm_v2.jl @@ -8,7 +8,8 @@ using AWS.UUIDs copy_backup_to_region(backup_id, destination_region) copy_backup_to_region(backup_id, destination_region, params::Dict{String,<:Any}) -Copy an AWS CloudHSM cluster backup to a different region. +Copy an CloudHSM cluster backup to a different region. Cross-account use: No. You cannot +perform this operation on an CloudHSM backup in a different Amazon Web Services account. # Arguments - `backup_id`: The ID of the backup that will be copied to the destination region. @@ -57,7 +58,9 @@ end create_cluster(hsm_type, subnet_ids) create_cluster(hsm_type, subnet_ids, params::Dict{String,<:Any}) -Creates a new AWS CloudHSM cluster. +Creates a new CloudHSM cluster. Cross-account use: Yes. To perform this operation with an +CloudHSM backup in a different AWS account, specify the full backup ARN in the value of the +SourceBackupId parameter. # Arguments - `hsm_type`: The type of HSM to use in the cluster. The allowed values are hsm1.medium and @@ -71,9 +74,10 @@ Creates a new AWS CloudHSM cluster. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"BackupRetentionPolicy"`: A policy that defines how the service retains backups. - `"Mode"`: The mode to use in the cluster. The allowed values are FIPS and NON_FIPS. -- `"SourceBackupId"`: The identifier (ID) of the cluster backup to restore. Use this value - to restore the cluster from a backup instead of creating a new cluster. To find the backup - ID, use DescribeBackups. +- `"SourceBackupId"`: The identifier (ID) or the Amazon Resource Name (ARN) of the cluster + backup to restore. Use this value to restore the cluster from a backup instead of creating + a new cluster. To find the backup ID or ARN, use DescribeBackups. If using a backup in + another account, the full ARN must be supplied. - `"TagList"`: Tags to apply to the CloudHSM cluster during creation. """ function create_cluster( @@ -110,7 +114,9 @@ end create_hsm(availability_zone, cluster_id) create_hsm(availability_zone, cluster_id, params::Dict{String,<:Any}) -Creates a new hardware security module (HSM) in the specified AWS CloudHSM cluster. +Creates a new hardware security module (HSM) in the specified CloudHSM cluster. +Cross-account use: No. You cannot perform this operation on an CloudHSM cluster in a +different Amazon Web Service account. # Arguments - `availability_zone`: The Availability Zone where you are creating the HSM. To find the @@ -160,8 +166,10 @@ end delete_backup(backup_id) delete_backup(backup_id, params::Dict{String,<:Any}) -Deletes a specified AWS CloudHSM backup. A backup can be restored up to 7 days after the -DeleteBackup request is made. For more information on restoring a backup, see RestoreBackup. +Deletes a specified CloudHSM backup. A backup can be restored up to 7 days after the +DeleteBackup request is made. For more information on restoring a backup, see +RestoreBackup. Cross-account use: No. You cannot perform this operation on an CloudHSM +backup in a different Amazon Web Services account. # Arguments - `backup_id`: The ID of the backup to be deleted. To find the ID of a backup, use the @@ -195,9 +203,10 @@ end delete_cluster(cluster_id) delete_cluster(cluster_id, params::Dict{String,<:Any}) -Deletes the specified AWS CloudHSM cluster. Before you can delete a cluster, you must -delete all HSMs in the cluster. To see if the cluster contains any HSMs, use -DescribeClusters. To delete an HSM, use DeleteHsm. +Deletes the specified CloudHSM cluster. Before you can delete a cluster, you must delete +all HSMs in the cluster. To see if the cluster contains any HSMs, use DescribeClusters. To +delete an HSM, use DeleteHsm. Cross-account use: No. You cannot perform this operation on +an CloudHSM cluster in a different Amazon Web Services account. # Arguments - `cluster_id`: The identifier (ID) of the cluster that you are deleting. To find the @@ -234,6 +243,8 @@ end Deletes the specified HSM. To specify an HSM, you can use its identifier (ID), the IP address of the HSM's elastic network interface (ENI), or the ID of the HSM's ENI. You need to specify only one of these values. To find these values, use DescribeClusters. +Cross-account use: No. You cannot perform this operation on an CloudHSM hsm in a different +Amazon Web Services account. # Arguments - `cluster_id`: The identifier (ID) of the cluster that contains the HSM that you are @@ -270,15 +281,49 @@ function delete_hsm( ) end +""" + delete_resource_policy() + delete_resource_policy(params::Dict{String,<:Any}) + + Deletes an CloudHSM resource policy. Deleting a resource policy will result in the +resource being unshared and removed from any RAM resource shares. Deleting the resource +policy attached to a backup will not impact any clusters created from that backup. +Cross-account use: No. You cannot perform this operation on an CloudHSM resource in a +different Amazon Web Services account. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ResourceArn"`: Amazon Resource Name (ARN) of the resource from which the policy will be + removed. +""" +function delete_resource_policy(; aws_config::AbstractAWSConfig=global_aws_config()) + return cloudhsm_v2( + "DeleteResourcePolicy"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function delete_resource_policy( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return cloudhsm_v2( + "DeleteResourcePolicy", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_backups() describe_backups(params::Dict{String,<:Any}) -Gets information about backups of AWS CloudHSM clusters. This is a paginated operation, -which means that each response might contain only a subset of all the backups. When the -response contains only a subset of backups, it includes a NextToken value. Use this value -in a subsequent DescribeBackups request to get more backups. When you receive a response -with no NextToken (or an empty or null value), that means there are no more backups to get. +Gets information about backups of CloudHSM clusters. Lists either the backups you own or +the backups shared with you when the Shared parameter is true. This is a paginated +operation, which means that each response might contain only a subset of all the backups. +When the response contains only a subset of backups, it includes a NextToken value. Use +this value in a subsequent DescribeBackups request to get more backups. When you receive a +response with no NextToken (or an empty or null value), that means there are no more +backups to get. Cross-account use: Yes. Customers can describe backups in other Amazon Web +Services accounts that are shared with them. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -296,6 +341,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys more backups than the number you specify, the response contains a NextToken value. - `"NextToken"`: The NextToken value that you received in the previous response. Use this value to get more backups. +- `"Shared"`: Describe backups that are shared with you. By default when using this + option, the command returns backups that have been shared using a standard Resource Access + Manager resource share. In order for a backup that was shared using the PutResourcePolicy + command to be returned, the share must be promoted to a standard resource share using the + RAM PromoteResourceShareCreatedFromPolicy API operation. For more information about sharing + backups, see Working with shared backups in the CloudHSM User Guide. - `"SortAscending"`: Designates whether or not to sort the return backups by ascending chronological order of generation. """ @@ -316,11 +367,13 @@ end describe_clusters() describe_clusters(params::Dict{String,<:Any}) -Gets information about AWS CloudHSM clusters. This is a paginated operation, which means -that each response might contain only a subset of all the clusters. When the response -contains only a subset of clusters, it includes a NextToken value. Use this value in a -subsequent DescribeClusters request to get more clusters. When you receive a response with -no NextToken (or an empty or null value), that means there are no more clusters to get. +Gets information about CloudHSM clusters. This is a paginated operation, which means that +each response might contain only a subset of all the clusters. When the response contains +only a subset of clusters, it includes a NextToken value. Use this value in a subsequent +DescribeClusters request to get more clusters. When you receive a response with no +NextToken (or an empty or null value), that means there are no more clusters to get. +Cross-account use: No. You cannot perform this operation on CloudHSM clusters in a +different Amazon Web Services account. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -347,14 +400,40 @@ function describe_clusters( ) end +""" + get_resource_policy() + get_resource_policy(params::Dict{String,<:Any}) + + Retrieves the resource policy document attached to a given resource. Cross-account use: +No. You cannot perform this operation on an CloudHSM resource in a different Amazon Web +Services account. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ResourceArn"`: Amazon Resource Name (ARN) of the resource to which a policy is attached. +""" +function get_resource_policy(; aws_config::AbstractAWSConfig=global_aws_config()) + return cloudhsm_v2( + "GetResourcePolicy"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function get_resource_policy( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return cloudhsm_v2( + "GetResourcePolicy", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + """ initialize_cluster(cluster_id, signed_cert, trust_anchor) initialize_cluster(cluster_id, signed_cert, trust_anchor, params::Dict{String,<:Any}) -Claims an AWS CloudHSM cluster by submitting the cluster certificate issued by your issuing +Claims an CloudHSM cluster by submitting the cluster certificate issued by your issuing certificate authority (CA) and the CA's root certificate. Before you can claim a cluster, you must sign the cluster's certificate signing request (CSR) with your issuing CA. To get -the cluster's CSR, use DescribeClusters. +the cluster's CSR, use DescribeClusters. Cross-account use: No. You cannot perform this +operation on an CloudHSM cluster in a different Amazon Web Services account. # Arguments - `cluster_id`: The identifier (ID) of the cluster that you are claiming. To find the @@ -412,11 +491,13 @@ end list_tags(resource_id) list_tags(resource_id, params::Dict{String,<:Any}) -Gets a list of tags for the specified AWS CloudHSM cluster. This is a paginated operation, +Gets a list of tags for the specified CloudHSM cluster. This is a paginated operation, which means that each response might contain only a subset of all the tags. When the response contains only a subset of tags, it includes a NextToken value. Use this value in a subsequent ListTags request to get more tags. When you receive a response with no NextToken -(or an empty or null value), that means there are no more tags to get. +(or an empty or null value), that means there are no more tags to get. Cross-account use: +No. You cannot perform this operation on an CloudHSM resource in a different Amazon Web +Services account. # Arguments - `resource_id`: The cluster identifier (ID) for the cluster whose tags you are getting. To @@ -456,7 +537,8 @@ end modify_backup_attributes(backup_id, never_expires) modify_backup_attributes(backup_id, never_expires, params::Dict{String,<:Any}) -Modifies attributes for AWS CloudHSM backup. +Modifies attributes for CloudHSM backup. Cross-account use: No. You cannot perform this +operation on an CloudHSM backup in a different Amazon Web Services account. # Arguments - `backup_id`: The identifier (ID) of the backup to modify. To find the ID of a backup, use @@ -500,7 +582,8 @@ end modify_cluster(backup_retention_policy, cluster_id) modify_cluster(backup_retention_policy, cluster_id, params::Dict{String,<:Any}) -Modifies AWS CloudHSM cluster. +Modifies CloudHSM cluster. Cross-account use: No. You cannot perform this operation on an +CloudHSM cluster in a different Amazon Web Services account. # Arguments - `backup_retention_policy`: A policy that defines how the service retains backups. @@ -543,12 +626,52 @@ function modify_cluster( ) end +""" + put_resource_policy() + put_resource_policy(params::Dict{String,<:Any}) + +Creates or updates an CloudHSM resource policy. A resource policy helps you to define the +IAM entity (for example, an Amazon Web Services account) that can manage your CloudHSM +resources. The following resources support CloudHSM resource policies: Backup - The +resource policy allows you to describe the backup and restore a cluster from the backup in +another Amazon Web Services account. In order to share a backup, it must be in a 'READY' +state and you must own it. While you can share a backup using the CloudHSM +PutResourcePolicy operation, we recommend using Resource Access Manager (RAM) instead. +Using RAM provides multiple benefits as it creates the policy for you, allows multiple +resources to be shared at one time, and increases the discoverability of shared resources. +If you use PutResourcePolicy and want consumers to be able to describe the backups you +share with them, you must promote the backup to a standard RAM Resource Share using the RAM +PromoteResourceShareCreatedFromPolicy API operation. For more information, see Working +with shared backups in the CloudHSM User Guide Cross-account use: No. You cannot perform +this operation on an CloudHSM resource in a different Amazon Web Services account. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Policy"`: The policy you want to associate with a resource. For an example policy, see + Working with shared backups in the CloudHSM User Guide +- `"ResourceArn"`: Amazon Resource Name (ARN) of the resource to which you want to attach a + policy. +""" +function put_resource_policy(; aws_config::AbstractAWSConfig=global_aws_config()) + return cloudhsm_v2( + "PutResourcePolicy"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function put_resource_policy( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return cloudhsm_v2( + "PutResourcePolicy", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + """ restore_backup(backup_id) restore_backup(backup_id, params::Dict{String,<:Any}) -Restores a specified AWS CloudHSM backup that is in the PENDING_DELETION state. For mor -information on deleting a backup, see DeleteBackup. +Restores a specified CloudHSM backup that is in the PENDING_DELETION state. For more +information on deleting a backup, see DeleteBackup. Cross-account use: No. You cannot +perform this operation on an CloudHSM backup in a different Amazon Web Services account. # Arguments - `backup_id`: The ID of the backup to be restored. To find the ID of a backup, use the @@ -582,7 +705,9 @@ end tag_resource(resource_id, tag_list) tag_resource(resource_id, tag_list, params::Dict{String,<:Any}) -Adds or overwrites one or more tags for the specified AWS CloudHSM cluster. +Adds or overwrites one or more tags for the specified CloudHSM cluster. Cross-account use: +No. You cannot perform this operation on an CloudHSM resource in a different Amazon Web +Services account. # Arguments - `resource_id`: The cluster identifier (ID) for the cluster that you are tagging. To find @@ -624,7 +749,9 @@ end untag_resource(resource_id, tag_key_list) untag_resource(resource_id, tag_key_list, params::Dict{String,<:Any}) -Removes the specified tag or tags from the specified AWS CloudHSM cluster. +Removes the specified tag or tags from the specified CloudHSM cluster. Cross-account use: +No. You cannot perform this operation on an CloudHSM resource in a different Amazon Web +Services account. # Arguments - `resource_id`: The cluster identifier (ID) for the cluster whose tags you are removing. diff --git a/src/services/connect.jl b/src/services/connect.jl index 23a305e19b..92967d277b 100644 --- a/src/services/connect.jl +++ b/src/services/connect.jl @@ -1048,7 +1048,7 @@ provided in the StartAttachedFileUpload API. # Arguments - `file_id`: The unique identifier of the attached file resource. -- `instance_id`: The unique identifier of the Connect instance. +- `instance_id`: The unique identifier of the Amazon Connect instance. - `associated_resource_arn`: The resource to which the attached file is (being) uploaded to. Cases are the only current supported resource. This value must be a valid ARN. @@ -3574,6 +3574,45 @@ function describe_agent_status( ) end +""" + describe_authentication_profile(authentication_profile_id, instance_id) + describe_authentication_profile(authentication_profile_id, instance_id, params::Dict{String,<:Any}) + +This API is in preview release for Amazon Connect and is subject to change. To request +access to this API, contact Amazon Web Services Support. Describes the target +authentication profile. + +# Arguments +- `authentication_profile_id`: A unique identifier for the authentication profile. +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instance + ID in the Amazon Resource Name (ARN) of the instance. + +""" +function describe_authentication_profile( + AuthenticationProfileId, InstanceId; aws_config::AbstractAWSConfig=global_aws_config() +) + return connect( + "GET", + "/authentication-profiles/$(InstanceId)/$(AuthenticationProfileId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_authentication_profile( + AuthenticationProfileId, + InstanceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "GET", + "/authentication-profiles/$(InstanceId)/$(AuthenticationProfileId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_contact(contact_id, instance_id) describe_contact(contact_id, instance_id, params::Dict{String,<:Any}) @@ -5528,32 +5567,35 @@ definitions in the Amazon Connect Administrator Guide. interval for the retrieval of historical metrics data. The time must be later than the start time timestamp. It cannot be later than the current timestamp. - `filters`: The filters to apply to returned metrics. You can filter on the following - resources: Agents Channels Feature Queues Routing profiles Routing step - expression User hierarchy groups At least one filter must be passed from queues, - routing profiles, agents, or user hierarchy groups. To filter by phone number, see Create a - historical metrics report in the Amazon Connect Administrator Guide. Note the following - limits: Filter keys: A maximum of 5 filter keys are supported in a single request. Valid - filter keys: AGENT | AGENT_HIERARCHY_LEVEL_ONE | AGENT_HIERARCHY_LEVEL_TWO | - AGENT_HIERARCHY_LEVEL_THREE | AGENT_HIERARCHY_LEVEL_FOUR | AGENT_HIERARCHY_LEVEL_FIVE | + resources: Agents Campaigns Channels Feature Queues Routing profiles Routing + step expression User hierarchy groups At least one filter must be passed from queues, + routing profiles, agents, or user hierarchy groups. For metrics for outbound campaigns + analytics, you can also use campaigns to satisfy at least one filter requirement. To filter + by phone number, see Create a historical metrics report in the Amazon Connect Administrator + Guide. Note the following limits: Filter keys: A maximum of 5 filter keys are supported + in a single request. Valid filter keys: AGENT | AGENT_HIERARCHY_LEVEL_ONE | + AGENT_HIERARCHY_LEVEL_TWO | AGENT_HIERARCHY_LEVEL_THREE | AGENT_HIERARCHY_LEVEL_FOUR | + AGENT_HIERARCHY_LEVEL_FIVE | ANSWERING_MACHINE_DETECTION_STATUS | CAMPAIGN | CASE_TEMPLATE_ARN | CASE_STATUS | CHANNEL | contact/segmentAttributes/connect:Subtype | - FEATURE | FLOW_TYPE | FLOWS_NEXT_RESOURCE_ID | FLOWS_NEXT_RESOURCE_QUEUE_ID | - FLOWS_OUTCOME_TYPE | FLOWS_RESOURCE_ID | INITIATION_METHOD | RESOURCE_PUBLISHED_TIMESTAMP | - ROUTING_PROFILE | ROUTING_STEP_EXPRESSION | QUEUE | Q_CONNECT_ENABLED | Filter values: - A maximum of 100 filter values are supported in a single request. VOICE, CHAT, and TASK are - valid filterValue for the CHANNEL filter key. They do not count towards limitation of 100 - filter values. For example, a GetMetricDataV2 request can filter by 50 queues, 35 agents, - and 15 routing profiles for a total of 100 filter values, along with 3 channel filters. - contact_lens_conversational_analytics is a valid filterValue for the FEATURE filter key. It - is available only to contacts analyzed by Contact Lens conversational analytics. - connect:Chat, connect:SMS, connect:Telephony, and connect:WebRTC are valid filterValue - examples (not exhaustive) for the contact/segmentAttributes/connect:Subtype filter key. - ROUTING_STEP_EXPRESSION is a valid filter key with a filter value up to 3000 length. This - filter is case and order sensitive. JSON string fields must be sorted in ascending order - and JSON array order should be kept as is. Q_CONNECT_ENABLED. TRUE and FALSE are the only - valid filterValues for the Q_CONNECT_ENABLED filter key. TRUE includes all contacts that - had Amazon Q in Connect enabled as part of the flow. FALSE includes all contacts that did - not have Amazon Q in Connect enabled as part of the flow This filter is available only - for contact record-driven metrics. + DISCONNECT_REASON | FEATURE | FLOW_TYPE | FLOWS_NEXT_RESOURCE_ID | + FLOWS_NEXT_RESOURCE_QUEUE_ID | FLOWS_OUTCOME_TYPE | FLOWS_RESOURCE_ID | INITIATION_METHOD | + RESOURCE_PUBLISHED_TIMESTAMP | ROUTING_PROFILE | ROUTING_STEP_EXPRESSION | QUEUE | + Q_CONNECT_ENABLED | Filter values: A maximum of 100 filter values are supported in a + single request. VOICE, CHAT, and TASK are valid filterValue for the CHANNEL filter key. + They do not count towards limitation of 100 filter values. For example, a GetMetricDataV2 + request can filter by 50 queues, 35 agents, and 15 routing profiles for a total of 100 + filter values, along with 3 channel filters. contact_lens_conversational_analytics is a + valid filterValue for the FEATURE filter key. It is available only to contacts analyzed by + Contact Lens conversational analytics. connect:Chat, connect:SMS, connect:Telephony, and + connect:WebRTC are valid filterValue examples (not exhaustive) for the + contact/segmentAttributes/connect:Subtype filter key. ROUTING_STEP_EXPRESSION is a valid + filter key with a filter value up to 3000 length. This filter is case and order sensitive. + JSON string fields must be sorted in ascending order and JSON array order should be kept as + is. Q_CONNECT_ENABLED. TRUE and FALSE are the only valid filterValues for the + Q_CONNECT_ENABLED filter key. TRUE includes all contacts that had Amazon Q in Connect + enabled as part of the flow. FALSE includes all contacts that did not have Amazon Q in + Connect enabled as part of the flow This filter is available only for contact + record-driven metrics. Campaign ARNs are valid filterValues for the CAMPAIGN filter key. - `metrics`: The metrics to retrieve. Specify the name, groupings, and filters for each metric. The following historical metrics are available. For a description of each metric, see Historical metrics definitions in the Amazon Connect Administrator Guide. @@ -5603,10 +5645,13 @@ definitions in the Amazon Connect Administrator Guide. Feature is a valid filter but not a valid grouping. AVG_CONVERSATION_DURATION Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: - Average conversation duration AVG_FLOW_TIME Unit: Seconds Valid groupings and filters: - Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, - Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource - ID, Initiation method, Resource published timestamp UI name: Average flow time + Average conversation duration AVG_DIALS_PER_MINUTE This metric is available only for + contacts analyzed by outbound campaigns analytics. Unit: Count Valid groupings and filters: + Campaign, Agent, Queue, Routing Profile UI name: Average dials per minute AVG_FLOW_TIME + Unit: Seconds Valid groupings and filters: Channel, + contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next + resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, + Initiation method, Resource published timestamp UI name: Average flow time AVG_GREETING_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in @@ -5655,26 +5700,25 @@ definitions in the Amazon Connect Administrator Guide. AVG_TALK_TIME_CUSTOMER This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in - Connect UI name: Average customer talk time CASES_CREATED Unit: Count Required filter - key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: - Cases created CONTACTS_ABANDONED Unit: Count Metric filter: Valid values: API| - Incoming | Outbound | Transfer | Callback | Queue_Transfer| Disconnect Valid groupings - and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, - contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect UI name: - Contact abandoned CONTACTS_ABANDONED_IN_X Unit: Count Valid groupings and filters: - Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect - Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in - seconds. For Comparison, you must enter LT (for \"Less than\"). UI name: Contacts - abandoned in X seconds CONTACTS_ANSWERED_IN_X Unit: Count Valid groupings and filters: - Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect - Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in - seconds. For Comparison, you must enter LT (for \"Less than\"). UI name: Contacts answered - in X seconds CONTACTS_CREATED Unit: Count Valid metric filter key: INITIATION_METHOD - Valid groupings and filters: Queue, Channel, Routing Profile, Feature, - contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts created Feature - is a valid filter but not a valid grouping. CONTACTS_HANDLED Unit: Count Valid metric - filter key: INITIATION_METHOD, DISCONNECT_REASON Valid groupings and filters: Queue, - Channel, Routing Profile, Agent, Agent Hierarchy, Feature, + Connect UI name: Average customer talk time AVG_WAIT_TIME_AFTER_CUSTOMER_CONNECTION This + metric is available only for contacts analyzed by outbound campaigns analytics. Unit: + Seconds Valid groupings and filters: Campaign UI name: Average wait time after customer + connection CAMPAIGN_CONTACTS_ABANDONED_AFTER_X This metric is available only for + contacts analyzed by outbound campaigns analytics. Unit: Count Valid groupings and filters: + Campaign, Agent Threshold: For ThresholdValue, enter any whole number from 1 to 604800 + (inclusive), in seconds. For Comparison, you must enter GT (for Greater than). UI name: + Campaign contacts abandoned after X CAMPAIGN_CONTACTS_ABANDONED_AFTER_X_RATE This metric + is available only for contacts analyzed by outbound campaigns analytics. Unit: Percent + Valid groupings and filters: Campaign, Agent Threshold: For ThresholdValue, enter any + whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter GT + (for Greater than). UI name: Campaign contacts abandoned after X rate CASES_CREATED + Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: + CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases created CONTACTS_CREATED Unit: Count Valid + metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Routing + Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts + created Feature is a valid filter but not a valid grouping. CONTACTS_HANDLED Unit: + Count Valid metric filter key: INITIATION_METHOD, DISCONNECT_REASON Valid groupings and + filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect UI name: API contacts handled Feature is a valid filter but not a valid grouping. CONTACTS_HANDLED_BY_CONNECTED_TO_AGENT Unit: Count Valid metric filter key: @@ -5698,53 +5742,72 @@ definitions in the Amazon Connect Administrator Guide. contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts queued CONTACTS_QUEUED_BY_ENQUEUE Unit: Count Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype UI name: Contacts queued - (enqueue timestamp) CONTACTS_RESOLVED_IN_X Unit: Count Valid groupings and filters: - Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect - Threshold: For ThresholdValue enter any whole number from 1 to 604800 (inclusive), in - seconds. For Comparison, you must enter LT (for \"Less than\"). UI name: Contacts resolved - in X CONTACTS_TRANSFERRED_OUT Unit: Count Valid groupings and filters: Queue, Channel, - Routing Profile, Agent, Agent Hierarchy, Feature, + (enqueue timestamp) CONTACTS_REMOVED_FROM_QUEUE_IN_X Unit: Count Valid groupings and + filters: Queue, Channel, Routing Profile, Q in Connect Threshold: For ThresholdValue, enter + any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter + LT (for \"Less than\"). UI name: Contacts removed from queue in X seconds + CONTACTS_RESOLVED_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing + Profile, contact/segmentAttributes/connect:Subtype, Q in Connect Threshold: For + ThresholdValue enter any whole number from 1 to 604800 (inclusive), in seconds. For + Comparison, you must enter LT (for \"Less than\"). UI name: Contacts resolved in X + CONTACTS_TRANSFERRED_OUT Unit: Count Valid groupings and filters: Queue, Channel, Routing + Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in + Connect UI name: Contacts transferred out Feature is a valid filter but not a valid + grouping. CONTACTS_TRANSFERRED_OUT_BY_AGENT Unit: Count Valid groupings and filters: + Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts transferred out - Feature is a valid filter but not a valid grouping. CONTACTS_TRANSFERRED_OUT_BY_AGENT - Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent - Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts - transferred out by agent CONTACTS_TRANSFERRED_OUT_FROM_QUEUE Unit: Count Valid groupings - and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, + by agent CONTACTS_TRANSFERRED_OUT_FROM_QUEUE Unit: Count Valid groupings and filters: + Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts transferred out queue CURRENT_CASES Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings - and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Current cases FLOWS_OUTCOME Unit: - Count Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow - type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows - outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: - Flows outcome FLOWS_STARTED Unit: Count Valid groupings and filters: Channel, + and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Current cases DELIVERY_ATTEMPTS + This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: + Count Valid metric filter key: ANSWERING_MACHINE_DETECTION_STATUS, DISCONNECT_REASON Valid + groupings and filters: Campaign, Agent, Queue, Routing Profile, Answering Machine Detection + Status, Disconnect Reason UI name: Delivery attempts DELIVERY_ATTEMPT_DISPOSITION_RATE + This metric is available only for contacts analyzed by outbound campaigns analytics, and + with the answering machine detection enabled. Unit: Percent Valid metric filter key: + ANSWERING_MACHINE_DETECTION_STATUS, DISCONNECT_REASON Valid groupings and filters: + Campaign, Agent, Answering Machine Detection Status, Disconnect Reason Answering Machine + Detection Status and Disconnect Reason are valid filters but not valid groupings. UI name: + Delivery attempt disposition rate FLOWS_OUTCOME Unit: Count Valid groupings and filters: + Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, + Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource + ID, Initiation method, Resource published timestamp UI name: Flows outcome FLOWS_STARTED + Unit: Count Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows resource ID, Initiation method, Resource published timestamp UI name: Flows started - MAX_FLOW_TIME Unit: Seconds Valid groupings and filters: Channel, + HUMAN_ANSWERED_CALLS This metric is available only for contacts analyzed by outbound + campaigns analytics, and with the answering machine detection enabled. Unit: Count Valid + groupings and filters: Campaign, Agent UI name: Human answered MAX_FLOW_TIME Unit: + Seconds Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, + Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, + Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI + name: Maximum flow time MAX_QUEUED_TIME Unit: Seconds Valid groupings and filters: + Queue, Channel, Routing Profile, Agent, Agent Hierarchy, + contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Maximum queued time + MIN_FLOW_TIME Unit: Seconds Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, - Initiation method, Resource published timestamp UI name: Maximum flow time - MAX_QUEUED_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing - Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI - name: Maximum queued time MIN_FLOW_TIME Unit: Seconds Valid groupings and filters: - Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, - Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource - ID, Initiation method, Resource published timestamp UI name: Minimum flow time + Initiation method, Resource published timestamp UI name: Minimum flow time PERCENT_CASES_FIRST_CONTACT_RESOLVED Unit: Percent Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases resolved on first contact PERCENT_CONTACTS_STEP_EXPIRED Unit: Percent Valid groupings and filters: - Queue, RoutingStepExpression UI name: Not available PERCENT_CONTACTS_STEP_JOINED Unit: - Percent Valid groupings and filters: Queue, RoutingStepExpression UI name: Not available - PERCENT_FLOWS_OUTCOME Unit: Percent Valid metric filter key: FLOWS_OUTCOME_TYPE Valid - groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows - module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome - type, Flows resource ID, Initiation method, Resource published timestamp UI name: Flows - outcome percentage. The FLOWS_OUTCOME_TYPE is not a valid grouping. - PERCENT_NON_TALK_TIME This metric is available only for contacts analyzed by Contact Lens - conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, - Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in - Connect UI name: Non-talk time percent PERCENT_TALK_TIME This metric is available only - for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid - groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, + Queue, RoutingStepExpression UI name: This metric is available in Real-time Metrics UI but + not on the Historical Metrics UI. PERCENT_CONTACTS_STEP_JOINED Unit: Percent Valid + groupings and filters: Queue, RoutingStepExpression UI name: This metric is available in + Real-time Metrics UI but not on the Historical Metrics UI. PERCENT_FLOWS_OUTCOME Unit: + Percent Valid metric filter key: FLOWS_OUTCOME_TYPE Valid groupings and filters: Channel, + contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next + resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, + Initiation method, Resource published timestamp UI name: Flows outcome percentage. The + FLOWS_OUTCOME_TYPE is not a valid grouping. PERCENT_NON_TALK_TIME This metric is + available only for contacts analyzed by Contact Lens conversational analytics. Unit: + Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent + Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Non-talk time + percent PERCENT_TALK_TIME This metric is available only for contacts analyzed by Contact + Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, + Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Talk time percent PERCENT_TALK_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, @@ -5762,27 +5825,40 @@ definitions in the Amazon Connect Administrator Guide. Channel, Routing Profile, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for \"Less than\"). UI name: Service level X STEP_CONTACTS_QUEUED Unit: Count Valid - groupings and filters: Queue, RoutingStepExpression UI name: Not available - SUM_AFTER_CONTACT_WORK_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, - Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: After contact work time - SUM_CONNECTING_TIME_AGENT Unit: Seconds Valid metric filter key: INITIATION_METHOD. This - metric only supports the following filter keys as INITIATION_METHOD: INBOUND | OUTBOUND | - CALLBACK | API Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent - Hierarchy UI name: Agent API connecting time The Negate key in Metric Level Filters is - not applicable for this metric. SUM_CONTACT_FLOW_TIME Unit: Seconds Valid groupings and - filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: - Contact flow time SUM_CONTACT_TIME_AGENT Unit: Seconds Valid groupings and filters: - Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent on contact time - SUM_CONTACTS_DISCONNECTED Valid metric filter key: DISCONNECT_REASON Unit: Count Valid - groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, + groupings and filters: Queue, RoutingStepExpression UI name: This metric is available in + Real-time Metrics UI but not on the Historical Metrics UI. SUM_AFTER_CONTACT_WORK_TIME + Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent + Hierarchy, Q in Connect UI name: After contact work time SUM_CONNECTING_TIME_AGENT Unit: + Seconds Valid metric filter key: INITIATION_METHOD. This metric only supports the following + filter keys as INITIATION_METHOD: INBOUND | OUTBOUND | CALLBACK | API Valid groupings and + filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent API + connecting time The Negate key in Metric Level Filters is not applicable for this metric. + SUM_CONTACTS_ABANDONED Unit: Count Metric filter: Valid values: API| Incoming | + Outbound | Transfer | Callback | Queue_Transfer| Disconnect Valid groupings and filters: + Queue, Channel, Routing Profile, Agent, Agent Hierarchy, + contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect UI name: + Contact abandoned SUM_CONTACTS_ABANDONED_IN_X Unit: Count Valid groupings and filters: + Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect + Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in + seconds. For Comparison, you must enter LT (for \"Less than\"). UI name: Contacts + abandoned in X seconds SUM_CONTACTS_ANSWERED_IN_X Unit: Count Valid groupings and + filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in + Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), + in seconds. For Comparison, you must enter LT (for \"Less than\"). UI name: Contacts + answered in X seconds SUM_CONTACT_FLOW_TIME Unit: Seconds Valid groupings and filters: + Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contact flow + time SUM_CONTACT_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, + Agent, Agent Hierarchy UI name: Agent on contact time SUM_CONTACTS_DISCONNECTED Valid + metric filter key: DISCONNECT_REASON Unit: Count Valid groupings and filters: Queue, + Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contact disconnected - SUM_ERROR_STATUS_TIME_AGENT Unit: Seconds Valid groupings and filters: Queue, Channel, - Routing Profile, Agent, Agent Hierarchy UI name: Error status time SUM_HANDLE_TIME Unit: - Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent - Hierarchy, Q in Connect UI name: Contact handle time SUM_HOLD_TIME Unit: Count Valid + SUM_ERROR_STATUS_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, + Agent, Agent Hierarchy UI name: Error status time SUM_HANDLE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in - Connect UI name: Customer hold time SUM_IDLE_TIME_AGENT Unit: Seconds Valid groupings - and filters: Routing Profile, Agent, Agent Hierarchy UI name: Agent idle time + Connect UI name: Contact handle time SUM_HOLD_TIME Unit: Count Valid groupings and + filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: + Customer hold time SUM_IDLE_TIME_AGENT Unit: Seconds Valid groupings and filters: + Routing Profile, Agent, Agent Hierarchy UI name: Agent idle time SUM_INTERACTION_AND_HOLD_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Agent interaction and hold time SUM_INTERACTION_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, @@ -5808,8 +5884,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are returned apply to the metrics for each queue. They are not aggregated for all queues. If no grouping is specified, a summary of all metrics is returned. Valid grouping keys: AGENT | AGENT_HIERARCHY_LEVEL_ONE | AGENT_HIERARCHY_LEVEL_TWO | AGENT_HIERARCHY_LEVEL_THREE - | AGENT_HIERARCHY_LEVEL_FOUR | AGENT_HIERARCHY_LEVEL_FIVE | CASE_TEMPLATE_ARN | CASE_STATUS - | CHANNEL | contact/segmentAttributes/connect:Subtype | FLOWS_RESOURCE_ID | + | AGENT_HIERARCHY_LEVEL_FOUR | AGENT_HIERARCHY_LEVEL_FIVE | + ANSWERING_MACHINE_DETECTION_STATUS | CAMPAIGN | CASE_TEMPLATE_ARN | CASE_STATUS | CHANNEL | + contact/segmentAttributes/connect:Subtype | DISCONNECT_REASON | FLOWS_RESOURCE_ID | FLOWS_MODULE_RESOURCE_ID | FLOW_TYPE | FLOWS_OUTCOME_TYPE | INITIATION_METHOD | Q_CONNECT_ENABLED | QUEUE | RESOURCE_PUBLISHED_TIMESTAMP | ROUTING_PROFILE | ROUTING_STEP_EXPRESSION @@ -5999,7 +6076,20 @@ end Imports a claimed phone number from an external service, such as Amazon Pinpoint, into an Amazon Connect instance. You can call this API only in the same Amazon Web Services Region -where the Amazon Connect instance was created. +where the Amazon Connect instance was created. Call the DescribePhoneNumber API to verify +the status of a previous ImportPhoneNumber operation. If you plan to claim or import +numbers and then release numbers frequently, contact us for a service quota exception. +Otherwise, it is possible you will be blocked from claiming and releasing any more numbers +until up to 180 days past the oldest number released has expired. By default you can +claim or import and then release up to 200% of your maximum number of active phone numbers. +If you claim or import and then release phone numbers using the UI or API during a rolling +180 day cycle that exceeds 200% of your phone number service level quota, you will be +blocked from claiming or importing any more numbers until 180 days past the oldest number +released has expired. For example, if you already have 99 claimed or imported numbers and +a service level quota of 99 phone numbers, and in any 180 day period you release 99, claim +99, and then release 99, you will have exceeded the 200% limit. At that point you are +blocked from claiming any more numbers until you open an Amazon Web Services Support +ticket. # Arguments - `instance_id`: The identifier of the Amazon Connect instance. You can find the instance @@ -6180,6 +6270,48 @@ function list_approved_origins( ) end +""" + list_authentication_profiles(instance_id) + list_authentication_profiles(instance_id, params::Dict{String,<:Any}) + +This API is in preview release for Amazon Connect and is subject to change. To request +access to this API, contact Amazon Web Services Support. Provides summary information about +the authentication profiles in a specified Amazon Connect instance. + +# Arguments +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instance + ID in the Amazon Resource Name (ARN) of the instance. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return per page. +- `"nextToken"`: The token for the next set of results. Use the value returned in the + previous response in the next request to retrieve the next set of results. +""" +function list_authentication_profiles( + InstanceId; aws_config::AbstractAWSConfig=global_aws_config() +) + return connect( + "GET", + "/authentication-profiles-summary/$(InstanceId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_authentication_profiles( + InstanceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "GET", + "/authentication-profiles-summary/$(InstanceId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_bots(instance_id, lex_version) list_bots(instance_id, lex_version, params::Dict{String,<:Any}) @@ -8318,6 +8450,51 @@ function resume_contact_recording( ) end +""" + search_agent_statuses(instance_id) + search_agent_statuses(instance_id, params::Dict{String,<:Any}) + +Searches AgentStatuses in an Amazon Connect instance, with optional filtering. + +# Arguments +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instanceId + in the ARN of the instance. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: The maximum number of results to return per page. +- `"NextToken"`: The token for the next set of results. Use the value returned in the + previous response in the next request to retrieve the next set of results. +- `"SearchCriteria"`: The search criteria to be used to return agent statuses. +- `"SearchFilter"`: Filters to be applied to search results. +""" +function search_agent_statuses( + InstanceId; aws_config::AbstractAWSConfig=global_aws_config() +) + return connect( + "POST", + "/search-agent-statuses", + Dict{String,Any}("InstanceId" => InstanceId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function search_agent_statuses( + InstanceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "POST", + "/search-agent-statuses", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("InstanceId" => InstanceId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ search_available_phone_numbers(phone_number_country_code, phone_number_type) search_available_phone_numbers(phone_number_country_code, phone_number_type, params::Dict{String,<:Any}) @@ -8770,7 +8947,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys previous response in the next request to retrieve the next set of results. - `"ResourceTypes"`: The list of resource types to be used to search tags from. If not provided or if any empty list is provided, this API will search from all supported resource - types. + types. Supported resource types AGENT ROUTING_PROFILE STANDARD_QUEUE + SECURITY_PROFILE OPERATING_HOURS PROMPT CONTACT_FLOW FLOW_MODULE - `"SearchCriteria"`: The search criteria to be used to return tags. """ function search_resource_tags(InstanceId; aws_config::AbstractAWSConfig=global_aws_config()) @@ -8894,6 +9072,53 @@ function search_security_profiles( ) end +""" + search_user_hierarchy_groups(instance_id) + search_user_hierarchy_groups(instance_id, params::Dict{String,<:Any}) + +Searches UserHierarchyGroups in an Amazon Connect instance, with optional filtering. The +UserHierarchyGroup with \"LevelId\": \"0\" is the foundation for building levels on top of +an instance. It is not user-definable, nor is it visible in the UI. + +# Arguments +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instanceId + in the ARN of the instance. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: The maximum number of results to return per page. +- `"NextToken"`: The token for the next set of results. Use the value returned in the + previous response in the next request to retrieve the next set of results. +- `"SearchCriteria"`: The search criteria to be used to return UserHierarchyGroups. +- `"SearchFilter"`: Filters to be applied to search results. +""" +function search_user_hierarchy_groups( + InstanceId; aws_config::AbstractAWSConfig=global_aws_config() +) + return connect( + "POST", + "/search-user-hierarchy-groups", + Dict{String,Any}("InstanceId" => InstanceId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function search_user_hierarchy_groups( + InstanceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "POST", + "/search-user-hierarchy-groups", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("InstanceId" => InstanceId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ search_users(instance_id) search_users(instance_id, params::Dict{String,<:Any}) @@ -9054,13 +9279,13 @@ end start_attached_file_upload(file_name, file_size_in_bytes, file_use_case_type, instance_id, associated_resource_arn, params::Dict{String,<:Any}) Provides a pre-signed Amazon S3 URL in response for uploading your content. You may only -use this API to upload attachments to a Connect Case. +use this API to upload attachments to an Amazon Connect Case. # Arguments - `file_name`: A case-sensitive name of the attached file being uploaded. - `file_size_in_bytes`: The size of the attached file in bytes. - `file_use_case_type`: The use case for the file. -- `instance_id`: The unique identifier of the Connect instance. +- `instance_id`: The unique identifier of the Amazon Connect instance. - `associated_resource_arn`: The resource to which the attached file is (being) uploaded to. Cases are the only current supported resource. This value must be a valid ARN. @@ -9141,8 +9366,9 @@ throttling returns a TooManyRequests exception. The quota for concurrent activ exceeded. Active chat throttling returns a LimitExceededException. If you use the ChatDurationInMinutes parameter and receive a 400 error, your account may not support the ability to configure custom chat durations. For more information, contact Amazon Web -Services Support. For more information about chat, see Chat in the Amazon Connect -Administrator Guide. +Services Support. For more information about chat, see the following topics in the Amazon +Connect Administrator Guide: Concepts: Web and mobile messaging capabilities in Amazon +Connect Amazon Connect Chat security best practices # Arguments - `contact_flow_id`: The identifier of the flow for initiating the chat. To see the @@ -9384,7 +9610,9 @@ end Initiates real-time message streaming for a new chat contact. For more information about message streaming, see Enable real-time chat message streaming in the Amazon Connect -Administrator Guide. +Administrator Guide. For more information about chat, see the following topics in the +Amazon Connect Administrator Guide: Concepts: Web and mobile messaging capabilities in +Amazon Connect Amazon Connect Chat security best practices # Arguments - `chat_streaming_configuration`: The streaming configuration, such as the Amazon SNS @@ -9674,8 +9902,8 @@ Amazon Connect instance (specified as InstanceId). # Arguments - `contact_flow_id`: The identifier of the flow for the call. To see the ContactFlowId in - the Amazon Connect admin website, on the navigation menu go to Routing, Contact Flows. - Choose the flow. On the flow page, under the name of the flow, choose Show additional flow + the Amazon Connect admin website, on the navigation menu go to Routing, Flows. Choose the + flow. On the flow page, under the name of the flow, choose Show additional flow information. The ContactFlowId is the last part of the ARN, shown here in bold: arn:aws:connect:us-west-2:xxxxxxxxxxxx:instance/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/contact -flow/846ec553-a005-41c0-8341-xxxxxxxxxxxx @@ -10317,6 +10545,61 @@ function update_agent_status( ) end +""" + update_authentication_profile(authentication_profile_id, instance_id) + update_authentication_profile(authentication_profile_id, instance_id, params::Dict{String,<:Any}) + +This API is in preview release for Amazon Connect and is subject to change. To request +access to this API, contact Amazon Web Services Support. Updates the selected +authentication profile. + +# Arguments +- `authentication_profile_id`: A unique identifier for the authentication profile. +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instance + ID in the Amazon Resource Name (ARN) of the instance. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AllowedIps"`: A list of IP address range strings that are allowed to access the + instance. For more information on how to configure IP addresses, seeConfigure session + timeouts in the Amazon Connect Administrator Guide. +- `"BlockedIps"`: A list of IP address range strings that are blocked from accessing the + instance. For more information on how to configure IP addresses, For more information on + how to configure IP addresses, see Configure IP-based access control in the Amazon Connect + Administrator Guide. +- `"Description"`: The description for the authentication profile. +- `"Name"`: The name for the authentication profile. +- `"PeriodicSessionDuration"`: The short lived session duration configuration for users + logged in to Amazon Connect, in minutes. This value determines the maximum possible time + before an agent is authenticated. For more information, For more information on how to + configure IP addresses, see Configure session timeouts in the Amazon Connect Administrator + Guide. +""" +function update_authentication_profile( + AuthenticationProfileId, InstanceId; aws_config::AbstractAWSConfig=global_aws_config() +) + return connect( + "POST", + "/authentication-profiles/$(InstanceId)/$(AuthenticationProfileId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_authentication_profile( + AuthenticationProfileId, + InstanceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "POST", + "/authentication-profiles/$(InstanceId)/$(AuthenticationProfileId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_contact(contact_id, instance_id) update_contact(contact_id, instance_id, params::Dict{String,<:Any}) diff --git a/src/services/connect_contact_lens.jl b/src/services/connect_contact_lens.jl index 4c4d4639c4..99702556ce 100644 --- a/src/services/connect_contact_lens.jl +++ b/src/services/connect_contact_lens.jl @@ -16,7 +16,7 @@ Provides a list of analysis segments for a real-time analysis session. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"MaxResults"`: The maximimum number of results to return per page. +- `"MaxResults"`: The maximum number of results to return per page. - `"NextToken"`: The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results. """ diff --git a/src/services/controltower.jl b/src/services/controltower.jl index d5f92b0b2e..c7e452f056 100644 --- a/src/services/controltower.jl +++ b/src/services/controltower.jl @@ -144,10 +144,10 @@ end disable_control(control_identifier, target_identifier) disable_control(control_identifier, target_identifier, params::Dict{String,<:Any}) -This API call turns off a control. It starts an asynchronous operation that deletes AWS -resources on the specified organizational unit and the accounts it contains. The resources -will vary according to the control that you specify. For usage examples, see the Amazon -Web Services Control Tower User Guide . +This API call turns off a control. It starts an asynchronous operation that deletes Amazon +Web Services resources on the specified organizational unit and the accounts it contains. +The resources will vary according to the control that you specify. For usage examples, see +the Controls Reference Guide . # Arguments - `control_identifier`: The ARN of the control. Only Strongly recommended and Elective @@ -265,7 +265,7 @@ end This API call activates a control. It starts an asynchronous operation that creates Amazon Web Services resources on the specified organizational unit and the accounts it contains. The resources created will vary according to the control that you specify. For usage -examples, see the Amazon Web Services Control Tower User Guide . +examples, see the Controls Reference Guide . # Arguments - `control_identifier`: The ARN of the control. Only Strongly recommended and Elective @@ -406,7 +406,7 @@ end Returns the status of a particular EnableControl or DisableControl operation. Displays a message in case of error. Details for an operation are available for 90 days. For usage -examples, see the Amazon Web Services Control Tower User Guide . +examples, see the Controls Reference Guide . # Arguments - `operation_identifier`: The ID of the asynchronous operation, which is used to track @@ -490,8 +490,8 @@ end get_enabled_control(enabled_control_identifier) get_enabled_control(enabled_control_identifier, params::Dict{String,<:Any}) -Retrieves details about an enabled control. For usage examples, see the Amazon Web -Services Control Tower User Guide . +Retrieves details about an enabled control. For usage examples, see the Controls Reference +Guide . # Arguments - `enabled_control_identifier`: The controlIdentifier of the enabled control. @@ -644,7 +644,8 @@ end list_control_operations() list_control_operations(params::Dict{String,<:Any}) -Provides a list of operations in progress or queued. +Provides a list of operations in progress or queued. For usage examples, see +ListControlOperation examples. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -713,12 +714,12 @@ end list_enabled_controls(params::Dict{String,<:Any}) Lists the controls enabled by Amazon Web Services Control Tower on the specified -organizational unit and the accounts it contains. For usage examples, see the Amazon Web -Services Control Tower User Guide . +organizational unit and the accounts it contains. For usage examples, see the Controls +Reference Guide . # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"filter"`: An input filter for the ListCEnabledControls API that lets you select the +- `"filter"`: An input filter for the ListEnabledControls API that lets you select the types of control operations to view. - `"maxResults"`: How many results to return per API call. - `"nextToken"`: The token to continue the list from a previous API call with the same @@ -746,6 +747,41 @@ function list_enabled_controls( ) end +""" + list_landing_zone_operations() + list_landing_zone_operations(params::Dict{String,<:Any}) + +Lists all landing zone operations from the past 90 days. Results are sorted by time, with +the most recent operation first. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"filter"`: An input filter for the ListLandingZoneOperations API that lets you select + the types of landing zone operations to view. +- `"maxResults"`: How many results to return per API call. +- `"nextToken"`: The token to continue the list from a previous API call with the same + parameters. +""" +function list_landing_zone_operations(; aws_config::AbstractAWSConfig=global_aws_config()) + return controltower( + "POST", + "/list-landingzone-operations"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_landing_zone_operations( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return controltower( + "POST", + "/list-landingzone-operations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_landing_zones() list_landing_zones(params::Dict{String,<:Any}) @@ -781,8 +817,8 @@ end list_tags_for_resource(resource_arn) list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) -Returns a list of tags associated with the resource. For usage examples, see the Amazon -Web Services Control Tower User Guide . +Returns a list of tags associated with the resource. For usage examples, see the Controls +Reference Guide . # Arguments - `resource_arn`: The ARN of the resource. @@ -861,7 +897,10 @@ end reset_landing_zone(landing_zone_identifier, params::Dict{String,<:Any}) This API call resets a landing zone. It starts an asynchronous operation that resets the -landing zone to the parameters specified in its original configuration. +landing zone to the parameters specified in the original configuration, which you specified +in the manifest file. Nothing in the manifest file's original landing zone configuration is +changed during the reset process, by default. This API is not the same as a rollback of a +landing zone version, which is not a supported operation. # Arguments - `landing_zone_identifier`: The unique identifier of the landing zone. @@ -902,8 +941,7 @@ end tag_resource(resource_arn, tags) tag_resource(resource_arn, tags, params::Dict{String,<:Any}) -Applies tags to a resource. For usage examples, see the Amazon Web Services Control Tower -User Guide . +Applies tags to a resource. For usage examples, see the Controls Reference Guide . # Arguments - `resource_arn`: The ARN of the resource to be tagged. @@ -938,8 +976,7 @@ end untag_resource(resource_arn, tag_keys) untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) -Removes tags from a resource. For usage examples, see the Amazon Web Services Control -Tower User Guide . +Removes tags from a resource. For usage examples, see the Controls Reference Guide . # Arguments - `resource_arn`: The ARN of the resource. @@ -1036,11 +1073,11 @@ end EnablementStatus of SUCCEEDED, supply parameters that are different from the currently configured parameters. Otherwise, Amazon Web Services Control Tower will not accept the request. If the enabled control shows an EnablementStatus of FAILED, Amazon Web Services -Control Tower will update the control to match any valid parameters that you supply. If the +Control Tower updates the control to match any valid parameters that you supply. If the DriftSummary status for the control shows as DRIFTED, you cannot call this API. Instead, you can update the control by calling DisableControl and again calling EnableControl, or -you can run an extending governance operation. For usage examples, see the Amazon Web -Services Control Tower User Guide +you can run an extending governance operation. For usage examples, see the Controls +Reference Guide . # Arguments - `enabled_control_identifier`: The ARN of the enabled control that will be updated. @@ -1095,8 +1132,10 @@ specified in the updated manifest file. # Arguments - `landing_zone_identifier`: The unique identifier of the landing zone. -- `manifest`: The manifest JSON file is a text file that describes your Amazon Web Services - resources. For examples, review Launch your landing zone. +- `manifest`: The manifest file (JSON) is a text file that describes your Amazon Web + Services resources. For an example, review Launch your landing zone. The example manifest + file contains each of the available parameters. The schema for the landing zone's JSON + manifest file is not published, by design. - `version`: The landing zone version, for example, 3.2. """ diff --git a/src/services/datazone.jl b/src/services/datazone.jl index 522ae7d8ab..bf5d439a32 100644 --- a/src/services/datazone.jl +++ b/src/services/datazone.jl @@ -281,6 +281,70 @@ function create_asset( ) end +""" + create_asset_filter(asset_identifier, configuration, domain_identifier, name) + create_asset_filter(asset_identifier, configuration, domain_identifier, name, params::Dict{String,<:Any}) + +Creates a data asset filter. + +# Arguments +- `asset_identifier`: The ID of the data asset. +- `configuration`: The configuration of the asset filter. +- `domain_identifier`: The ID of the domain in which you want to create an asset filter. +- `name`: The name of the asset filter. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier that is provided to ensure the + idempotency of the request. +- `"description"`: The description of the asset filter. +""" +function create_asset_filter( + assetIdentifier, + configuration, + domainIdentifier, + name; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/assets/$(assetIdentifier)/filters", + Dict{String,Any}( + "configuration" => configuration, + "name" => name, + "clientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_asset_filter( + assetIdentifier, + configuration, + domainIdentifier, + name, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/assets/$(assetIdentifier)/filters", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "configuration" => configuration, + "name" => name, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_asset_revision(domain_identifier, identifier, name) create_asset_revision(domain_identifier, identifier, name, params::Dict{String,<:Any}) @@ -1466,6 +1530,47 @@ function delete_asset( ) end +""" + delete_asset_filter(asset_identifier, domain_identifier, identifier) + delete_asset_filter(asset_identifier, domain_identifier, identifier, params::Dict{String,<:Any}) + +Deletes an asset filter. + +# Arguments +- `asset_identifier`: The ID of the data asset. +- `domain_identifier`: The ID of the domain where you want to delete an asset filter. +- `identifier`: The ID of the asset filter that you want to delete. + +""" +function delete_asset_filter( + assetIdentifier, + domainIdentifier, + identifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "DELETE", + "/v2/domains/$(domainIdentifier)/assets/$(assetIdentifier)/filters/$(identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_asset_filter( + assetIdentifier, + domainIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "DELETE", + "/v2/domains/$(domainIdentifier)/assets/$(assetIdentifier)/filters/$(identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_asset_type(domain_identifier, identifier) delete_asset_type(domain_identifier, identifier, params::Dict{String,<:Any}) @@ -2233,6 +2338,47 @@ function get_asset( ) end +""" + get_asset_filter(asset_identifier, domain_identifier, identifier) + get_asset_filter(asset_identifier, domain_identifier, identifier, params::Dict{String,<:Any}) + +Gets an asset filter. + +# Arguments +- `asset_identifier`: The ID of the data asset. +- `domain_identifier`: The ID of the domain where you want to get an asset filter. +- `identifier`: The ID of the asset filter. + +""" +function get_asset_filter( + assetIdentifier, + domainIdentifier, + identifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/assets/$(assetIdentifier)/filters/$(identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_asset_filter( + assetIdentifier, + domainIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/assets/$(assetIdentifier)/filters/$(identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_asset_type(domain_identifier, identifier) get_asset_type(domain_identifier, identifier, params::Dict{String,<:Any}) @@ -2751,6 +2897,48 @@ function get_iam_portal_login_url( ) end +""" + get_lineage_node(domain_identifier, identifier) + get_lineage_node(domain_identifier, identifier, params::Dict{String,<:Any}) + +Gets the data lineage node. + +# Arguments +- `domain_identifier`: The ID of the domain in which you want to get the data lineage node. +- `identifier`: The ID of the data lineage node that you want to get. Both, a lineage node + identifier generated by Amazon DataZone and a sourceIdentifier of the lineage node are + supported. If sourceIdentifier is greater than 1800 characters, you can use lineage node + identifier generated by Amazon DataZone to get the node details. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"timestamp"`: The event time stamp for which you want to get the data lineage node. +""" +function get_lineage_node( + domainIdentifier, identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/lineage/nodes/$(identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_lineage_node( + domainIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/lineage/nodes/$(identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_listing(domain_identifier, identifier) get_listing(domain_identifier, identifier, params::Dict{String,<:Any}) @@ -3109,6 +3297,54 @@ function get_user_profile( ) end +""" + list_asset_filters(asset_identifier, domain_identifier) + list_asset_filters(asset_identifier, domain_identifier, params::Dict{String,<:Any}) + +Lists asset filters. + +# Arguments +- `asset_identifier`: The ID of the data asset. +- `domain_identifier`: The ID of the domain where you want to list asset filters. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of asset filters to return in a single call to + ListAssetFilters. When the number of asset filters to be listed is greater than the value + of MaxResults, the response contains a NextToken value that you can use in a subsequent + call to ListAssetFilters to list the next set of asset filters. +- `"nextToken"`: When the number of asset filters is greater than the default value for the + MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than + the number of asset filters, the response includes a pagination token named NextToken. You + can specify this NextToken value in a subsequent call to ListAssetFilters to list the next + set of asset filters. +- `"status"`: The status of the asset filter. +""" +function list_asset_filters( + assetIdentifier, domainIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/assets/$(assetIdentifier)/filters"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_asset_filters( + assetIdentifier, + domainIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/assets/$(assetIdentifier)/filters", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_asset_revisions(domain_identifier, identifier) list_asset_revisions(domain_identifier, identifier, params::Dict{String,<:Any}) @@ -3602,6 +3838,62 @@ function list_environments( ) end +""" + list_lineage_node_history(domain_identifier, identifier) + list_lineage_node_history(domain_identifier, identifier, params::Dict{String,<:Any}) + +Lists the history of the specified data lineage node. + +# Arguments +- `domain_identifier`: The ID of the domain where you want to list the history of the + specified data lineage node. +- `identifier`: The ID of the data lineage node whose history you want to list. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"direction"`: The direction of the data lineage node refers to the lineage node having + neighbors in that direction. For example, if direction is UPSTREAM, the + ListLineageNodeHistory API responds with historical versions with upstream neighbors only. +- `"maxResults"`: The maximum number of history items to return in a single call to + ListLineageNodeHistory. When the number of memberships to be listed is greater than the + value of MaxResults, the response contains a NextToken value that you can use in a + subsequent call to ListLineageNodeHistory to list the next set of items. +- `"nextToken"`: When the number of history items is greater than the default value for the + MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than + the number of items, the response includes a pagination token named NextToken. You can + specify this NextToken value in a subsequent call to ListLineageNodeHistory to list the + next set of items. +- `"sortOrder"`: The order by which you want data lineage node history to be sorted. +- `"timestampGTE"`: Specifies whether the action is to return data lineage node history + from the time after the event timestamp. +- `"timestampLTE"`: Specifies whether the action is to return data lineage node history + from the time prior of the event timestamp. +""" +function list_lineage_node_history( + domainIdentifier, identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/lineage/nodes/$(identifier)/history"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_lineage_node_history( + domainIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/lineage/nodes/$(identifier)/history", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_metadata_generation_runs(domain_identifier) list_metadata_generation_runs(domain_identifier, params::Dict{String,<:Any}) @@ -4102,6 +4394,54 @@ function list_time_series_data_points( ) end +""" + post_lineage_event(domain_identifier, event) + post_lineage_event(domain_identifier, event, params::Dict{String,<:Any}) + +Posts a data lineage event. + +# Arguments +- `domain_identifier`: The ID of the domain where you want to post a data lineage event. +- `event`: The data lineage event that you want to post. Only open-lineage run event are + supported as events. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier that is provided to ensure the + idempotency of the request. +""" +function post_lineage_event( + domainIdentifier, event; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/lineage/events", + Dict{String,Any}("event" => event, "clientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function post_lineage_event( + domainIdentifier, + event, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/lineage/events", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("event" => event, "clientToken" => string(uuid4())), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ post_time_series_data_points(domain_identifier, entity_identifier, entity_type, forms) post_time_series_data_points(domain_identifier, entity_identifier, entity_type, forms, params::Dict{String,<:Any}) @@ -4173,6 +4513,7 @@ Writes the configuration for the specified environment blueprint in Amazon DataZ # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"manageAccessRoleArn"`: The ARN of the manage access role. +- `"provisioningConfigurations"`: The provisioning configuration of a blueprint. - `"provisioningRoleArn"`: The ARN of the provisioning role. - `"regionalParameters"`: The regional parameters in the environment blueprint. """ @@ -4796,6 +5137,52 @@ function untag_resource( ) end +""" + update_asset_filter(asset_identifier, domain_identifier, identifier) + update_asset_filter(asset_identifier, domain_identifier, identifier, params::Dict{String,<:Any}) + +Updates an asset filter. + +# Arguments +- `asset_identifier`: The ID of the data asset. +- `domain_identifier`: The ID of the domain where you want to update an asset filter. +- `identifier`: The ID of the asset filter. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"configuration"`: The configuration of the asset filter. +- `"description"`: The description of the asset filter. +- `"name"`: The name of the asset filter. +""" +function update_asset_filter( + assetIdentifier, + domainIdentifier, + identifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "PATCH", + "/v2/domains/$(domainIdentifier)/assets/$(assetIdentifier)/filters/$(identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_asset_filter( + assetIdentifier, + domainIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "PATCH", + "/v2/domains/$(domainIdentifier)/assets/$(assetIdentifier)/filters/$(identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_data_source(domain_identifier, identifier) update_data_source(domain_identifier, identifier, params::Dict{String,<:Any}) diff --git a/src/services/direct_connect.jl b/src/services/direct_connect.jl index 2fe23f9070..1fb6be1354 100644 --- a/src/services/direct_connect.jl +++ b/src/services/direct_connect.jl @@ -69,7 +69,7 @@ end allocate_connection_on_interconnect(bandwidth, connection_name, interconnect_id, owner_account, vlan) allocate_connection_on_interconnect(bandwidth, connection_name, interconnect_id, owner_account, vlan, params::Dict{String,<:Any}) -Deprecated. Use AllocateHostedConnection instead. Creates a hosted connection on an + Deprecated. Use AllocateHostedConnection instead. Creates a hosted connection on an interconnect. Allocates a VLAN number and a specified amount of bandwidth for use by a hosted connection on the specified interconnect. Intended for use by Direct Connect Partners only. @@ -149,9 +149,9 @@ Intended for use by Direct Connect Partners only. # Arguments - `bandwidth`: The bandwidth of the connection. The possible values are 50Mbps, 100Mbps, - 200Mbps, 300Mbps, 400Mbps, 500Mbps, 1Gbps, 2Gbps, 5Gbps, and 10Gbps. Note that only those - Direct Connect Partners who have met specific requirements are allowed to create a 1Gbps, - 2Gbps, 5Gbps or 10Gbps hosted connection. + 200Mbps, 300Mbps, 400Mbps, 500Mbps, 1Gbps, 2Gbps, 5Gbps, 10Gbps, and 25Gbps. Note that only + those Direct Connect Partners who have met specific requirements are allowed to create a + 1Gbps, 2Gbps, 5Gbps, 10Gbps, or 25Gbps hosted connection. - `connection_id`: The ID of the interconnect or LAG. - `connection_name`: The name of the hosted connection. - `owner_account`: The ID of the Amazon Web Services account ID of the customer for the @@ -1098,7 +1098,7 @@ the VLAN assigned to them by the Direct Connect Partner. Intended for use by Di Connect Partners only. # Arguments -- `bandwidth`: The port bandwidth, in Gbps. The possible values are 1 and 10. +- `bandwidth`: The port bandwidth, in Gbps. The possible values are 1, 10, and 100. - `interconnect_name`: The name of the interconnect. - `location`: The location of the interconnect. @@ -1155,28 +1155,28 @@ Creates a link aggregation group (LAG) with the specified number of bundled phys dedicated connections between the customer network and a specific Direct Connect location. A LAG is a logical interface that uses the Link Aggregation Control Protocol (LACP) to aggregate multiple interfaces, enabling you to treat them as a single interface. All -connections in a LAG must use the same bandwidth (either 1Gbps or 10Gbps) and must -terminate at the same Direct Connect endpoint. You can have up to 10 dedicated connections -per LAG. Regardless of this limit, if you request more connections for the LAG than Direct -Connect can allocate on a single endpoint, no LAG is created. You can specify an existing -physical dedicated connection or interconnect to include in the LAG (which counts towards -the total number of connections). Doing so interrupts the current physical dedicated -connection, and re-establishes them as a member of the LAG. The LAG will be created on the -same Direct Connect endpoint to which the dedicated connection terminates. Any virtual -interfaces associated with the dedicated connection are automatically disassociated and -re-associated with the LAG. The connection ID does not change. If the Amazon Web Services -account used to create a LAG is a registered Direct Connect Partner, the LAG is -automatically enabled to host sub-connections. For a LAG owned by a partner, any associated -virtual interfaces cannot be directly configured. +connections in a LAG must use the same bandwidth (either 1Gbps, 10Gbps, 100Gbps, or +400Gbps) and must terminate at the same Direct Connect endpoint. You can have up to 10 +dedicated connections per location. Regardless of this limit, if you request more +connections for the LAG than Direct Connect can allocate on a single endpoint, no LAG is +created.. You can specify an existing physical dedicated connection or interconnect to +include in the LAG (which counts towards the total number of connections). Doing so +interrupts the current physical dedicated connection, and re-establishes them as a member +of the LAG. The LAG will be created on the same Direct Connect endpoint to which the +dedicated connection terminates. Any virtual interfaces associated with the dedicated +connection are automatically disassociated and re-associated with the LAG. The connection +ID does not change. If the Amazon Web Services account used to create a LAG is a registered +Direct Connect Partner, the LAG is automatically enabled to host sub-connections. For a LAG +owned by a partner, any associated virtual interfaces cannot be directly configured. # Arguments - `connections_bandwidth`: The bandwidth of the individual physical dedicated connections - bundled by the LAG. The possible values are 1Gbps and 10Gbps. + bundled by the LAG. The possible values are 1Gbps,10Gbps, 100Gbps, and 400Gbps. - `lag_name`: The name of the LAG. - `location`: The location for the LAG. - `number_of_connections`: The number of physical dedicated connections initially provisioned and bundled by the LAG. You can have a maximum of four connections when the - port speed is 1G or 10G, or two when the port speed is 100G. + port speed is 1Gbps or 10Gbps, or two when the port speed is 100Gbps or 400Gbps. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1245,7 +1245,7 @@ gateway or a Virtual Private Gateway (VGW). Connecting the private virtual inter Direct Connect gateway enables the possibility for connecting to multiple VPCs, including VPCs in different Amazon Web Services Regions. Connecting the private virtual interface to a VGW only provides access to a single VPC within the same Region. Setting the MTU of a -virtual interface to 9001 (jumbo frames) can cause an update to the underlying physical +virtual interface to 8500 (jumbo frames) can cause an update to the underlying physical connection if it wasn't updated to support jumbo frames. Updating the connection disrupts network connectivity for all virtual interfaces associated with the connection for up to 30 seconds. To check whether your connection supports jumbo frames, call DescribeConnections. @@ -1691,7 +1691,7 @@ end describe_connection_loa(connection_id) describe_connection_loa(connection_id, params::Dict{String,<:Any}) -Deprecated. Use DescribeLoa instead. Gets the LOA-CFA for a connection. The Letter of + Deprecated. Use DescribeLoa instead. Gets the LOA-CFA for a connection. The Letter of Authorization - Connecting Facility Assignment (LOA-CFA) is a document that your APN partner or service provider uses when establishing your cross connect to Amazon Web Services at the colocation facility. For more information, see Requesting Cross Connects at @@ -1763,7 +1763,7 @@ end describe_connections_on_interconnect(interconnect_id) describe_connections_on_interconnect(interconnect_id, params::Dict{String,<:Any}) -Deprecated. Use DescribeHostedConnections instead. Lists the connections that have been + Deprecated. Use DescribeHostedConnections instead. Lists the connections that have been provisioned on the specified interconnect. Intended for use by Direct Connect Partners only. @@ -2017,7 +2017,7 @@ end describe_interconnect_loa(interconnect_id) describe_interconnect_loa(interconnect_id, params::Dict{String,<:Any}) -Deprecated. Use DescribeLoa instead. Gets the LOA-CFA for the specified interconnect. The + Deprecated. Use DescribeLoa instead. Gets the LOA-CFA for the specified interconnect. The Letter of Authorization - Connecting Facility Assignment (LOA-CFA) is a document that is used when establishing your cross connect to Amazon Web Services at the colocation facility. For more information, see Requesting Cross Connects at Direct Connect Locations @@ -2252,8 +2252,10 @@ end describe_virtual_gateways() describe_virtual_gateways(params::Dict{String,<:Any}) -Lists the virtual private gateways owned by the Amazon Web Services account. You can create -one or more Direct Connect private virtual interfaces linked to a virtual private gateway. + Deprecated. Use DescribeVpnGateways instead. See DescribeVPNGateways in the Amazon Elastic +Compute Cloud API Reference. Lists the virtual private gateways owned by the Amazon Web +Services account. You can create one or more Direct Connect private virtual interfaces +linked to a virtual private gateway. """ function describe_virtual_gateways(; aws_config::AbstractAWSConfig=global_aws_config()) @@ -2773,7 +2775,7 @@ end update_virtual_interface_attributes(virtual_interface_id, params::Dict{String,<:Any}) Updates the specified attributes of the specified virtual private interface. Setting the -MTU of a virtual interface to 9001 (jumbo frames) can cause an update to the underlying +MTU of a virtual interface to 8500 (jumbo frames) can cause an update to the underlying physical connection if it wasn't updated to support jumbo frames. Updating the connection disrupts network connectivity for all virtual interfaces associated with the connection for up to 30 seconds. To check whether your connection supports jumbo frames, call @@ -2787,7 +2789,7 @@ DescribeVirtualInterfaces. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"enableSiteLink"`: Indicates whether to enable or disable SiteLink. - `"mtu"`: The maximum transmission unit (MTU), in bytes. The supported values are 1500 and - 9001. The default value is 1500. + 8500. The default value is 1500. - `"virtualInterfaceName"`: The name of the virtual private interface. """ function update_virtual_interface_attributes( diff --git a/src/services/dynamodb.jl b/src/services/dynamodb.jl index c7c65bfb39..9fc08a5e46 100644 --- a/src/services/dynamodb.jl +++ b/src/services/dynamodb.jl @@ -11,11 +11,11 @@ using AWS.UUIDs This operation allows you to perform batch reads or writes on data stored in DynamoDB, using PartiQL. Each read statement in a BatchExecuteStatement must specify an equality condition on all key attributes. This enforces that each SELECT statement in a batch -returns at most a single item. The entire batch must consist of either read statements or -write statements, you cannot mix both in one batch. A HTTP 200 response does not mean -that all statements in the BatchExecuteStatement succeeded. Error details for individual -statements can be found under the Error field of the BatchStatementResponse for each -statement. +returns at most a single item. For more information, see Running batch operations with +PartiQL for DynamoDB . The entire batch must consist of either read statements or write +statements, you cannot mix both in one batch. A HTTP 200 response does not mean that all +statements in the BatchExecuteStatement succeeded. Error details for individual statements +can be found under the Error field of the BatchStatementResponse for each statement. # Arguments - `statements`: The list of PartiQL statements representing the batch to run. @@ -165,9 +165,12 @@ internal processing failure occurs, the failed operations are returned in the UnprocessedItems response parameter. You can investigate and optionally resend the requests. Typically, you would call BatchWriteItem in a loop. Each iteration would check for unprocessed items and submit a new BatchWriteItem request with those unprocessed items -until all items have been processed. If none of the items can be processed due to -insufficient provisioned throughput on all of the tables in the request, then -BatchWriteItem returns a ProvisionedThroughputExceededException. If DynamoDB returns any +until all items have been processed. For tables and indexes with provisioned capacity, if +none of the items can be processed due to insufficient provisioned throughput on all of the +tables in the request, then BatchWriteItem returns a +ProvisionedThroughputExceededException. For all tables and indexes, if none of the items +can be processed due to other throttling scenarios (such as exceeding partition level +limits), then BatchWriteItem returns a ThrottlingException. If DynamoDB returns any unprocessed items, you should retry the batch operation on those items. However, we strongly recommend that you use an exponential backoff algorithm. If you retry the batch operation immediately, the underlying read or write requests can still fail due to @@ -733,11 +736,11 @@ does not exist, DynamoDB returns a ResourceNotFoundException. If table is alread DELETING state, no error is returned. For global tables, this operation only applies to global tables using Version 2019.11.21 (Current version). DynamoDB might continue to accept data read and write operations, such as GetItem and PutItem, on a table in the -DELETING state until the table deletion is complete. When you delete a table, any indexes -on that table are also deleted. If you have DynamoDB Streams enabled on the table, then the -corresponding stream on that table goes into the DISABLED state, and the stream is -automatically deleted after 24 hours. Use the DescribeTable action to check the status of -the table. +DELETING state until the table deletion is complete. For the full list of table states, see +TableStatus. When you delete a table, any indexes on that table are also deleted. If you +have DynamoDB Streams enabled on the table, then the corresponding stream on that table +goes into the DISABLED state, and the stream is automatically deleted after 24 hours. Use +the DescribeTable action to check the status of the table. # Arguments - `table_name`: The name of the table to delete. You can also provide the Amazon Resource diff --git a/src/services/ec2.jl b/src/services/ec2.jl index 505cf5fd84..f46c081d6d 100644 --- a/src/services/ec2.jl +++ b/src/services/ec2.jl @@ -4108,10 +4108,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"DryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. -- `"PreserveClientIp"`: Indicates whether your client's IP address is preserved as the - source. The value is true or false. If true, your client's IP address is used when you - connect to a resource. If false, the elastic network interface IP address is used when - you connect to a resource. Default: true +- `"PreserveClientIp"`: Indicates whether the client IP address is preserved as the source. + The following are the possible values. true - Use the client IP address as the source. + false - Use the network interface IP address as the source. Default: false - `"SecurityGroupId"`: One or more security groups to associate with the endpoint. If you don't specify a security group, the default security group for your VPC will be associated with the endpoint. @@ -4343,6 +4342,54 @@ function create_ipam( ) end +""" + create_ipam_external_resource_verification_token(ipam_id) + create_ipam_external_resource_verification_token(ipam_id, params::Dict{String,<:Any}) + +Create a verification token. A verification token is an Amazon Web Services-generated +random value that you can use to prove ownership of an external resource. For example, you +can use a verification token to validate that you control a public IP address range when +you bring an IP address range to Amazon Web Services (BYOIP). + +# Arguments +- `ipam_id`: The ID of the IPAM that will create the token. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ClientToken"`: A unique, case-sensitive identifier that you provide to ensure the + idempotency of the request. For more information, see Ensuring idempotency. +- `"DryRun"`: A check for whether you have the required permissions for the action without + actually making the request and provides an error response. If you have the required + permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. +- `"TagSpecification"`: Token tags. +""" +function create_ipam_external_resource_verification_token( + IpamId; aws_config::AbstractAWSConfig=global_aws_config() +) + return ec2( + "CreateIpamExternalResourceVerificationToken", + Dict{String,Any}("IpamId" => IpamId, "ClientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_ipam_external_resource_verification_token( + IpamId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return ec2( + "CreateIpamExternalResourceVerificationToken", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("IpamId" => IpamId, "ClientToken" => string(uuid4())), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_ipam_pool(address_family, ipam_scope_id) create_ipam_pool(address_family, ipam_scope_id, params::Dict{String,<:Any}) @@ -4392,13 +4439,13 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"DryRun"`: A check for whether you have the required permissions for the action without actually making the request and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. -- `"Locale"`: In IPAM, the locale is the Amazon Web Services Region where you want to make - an IPAM pool available for allocations. Only resources in the same Region as the locale of - the pool can get IP address allocations from the pool. You can only allocate a CIDR for a - VPC, for example, from an IPAM pool that shares a locale with the VPC’s Region. Note that - once you choose a Locale for a pool, you cannot modify it. If you do not choose a locale, - resources in Regions others than the IPAM's home region cannot use CIDRs from this pool. - Possible values: Any Amazon Web Services Region, such as us-east-1. +- `"Locale"`: The locale for the pool should be one of the following: An Amazon Web + Services Region where you want this IPAM pool to be available for allocations. The + network border group for an Amazon Web Services Local Zone where you want this IPAM pool to + be available for allocations (supported Local Zones). This option is only available for + IPAM IPv4 pools in the public scope. If you do not choose a locale, resources in Regions + others than the IPAM's home region cannot use CIDRs from this pool. Possible values: Any + Amazon Web Services Region or supported Amazon Web Services Local Zone. - `"PublicIpSource"`: The IP address source for pools in the public scope. Only used for provisioning IP address CIDRs to pools in the public scope. Default is byoip. For more information, see Create IPv6 pools in the Amazon VPC IPAM User Guide. By default, you can @@ -5546,6 +5593,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"DryRun"`: A check for whether you have the required permissions for the action without actually making the request and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. +- `"NetworkBorderGroup"`: The Availability Zone (AZ) or Local Zone (LZ) network border + group that the resource that the IP address is assigned to is in. Defaults to an AZ network + border group. For more information on available Local Zones, see Local Zone availability in + the Amazon EC2 User Guide. - `"TagSpecification"`: The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for @@ -7549,7 +7600,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys enable Multi-Attach, you can attach the volume to up to 16 Instances built on the Nitro System in the same Availability Zone. This parameter is supported with io1 and io2 volumes only. For more information, see Amazon EBS Multi-Attach in the Amazon EBS User Guide. -- `"OutpostArn"`: The Amazon Resource Name (ARN) of the Outpost. +- `"OutpostArn"`: The Amazon Resource Name (ARN) of the Outpost on which to create the + volume. If you intend to use a volume with an instance running on an outpost, then you must + create the volume on the same outpost as the instance. You can't use a volume created in an + Amazon Web Services Region with an instance on an Amazon Web Services outpost, or the other + way around. - `"Size"`: The size of the volume, in GiBs. You must specify either a snapshot ID or a volume size. If you specify a snapshot, the default is the snapshot size. You can specify a volume size that is equal to or larger than the snapshot size. The following are the @@ -8748,6 +8803,60 @@ function delete_ipam( ) end +""" + delete_ipam_external_resource_verification_token(ipam_external_resource_verification_token_id) + delete_ipam_external_resource_verification_token(ipam_external_resource_verification_token_id, params::Dict{String,<:Any}) + +Delete a verification token. A verification token is an Amazon Web Services-generated +random value that you can use to prove ownership of an external resource. For example, you +can use a verification token to validate that you control a public IP address range when +you bring an IP address range to Amazon Web Services (BYOIP). + +# Arguments +- `ipam_external_resource_verification_token_id`: The token ID. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DryRun"`: A check for whether you have the required permissions for the action without + actually making the request and provides an error response. If you have the required + permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. +""" +function delete_ipam_external_resource_verification_token( + IpamExternalResourceVerificationTokenId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return ec2( + "DeleteIpamExternalResourceVerificationToken", + Dict{String,Any}( + "IpamExternalResourceVerificationTokenId" => + IpamExternalResourceVerificationTokenId, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_ipam_external_resource_verification_token( + IpamExternalResourceVerificationTokenId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return ec2( + "DeleteIpamExternalResourceVerificationToken", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "IpamExternalResourceVerificationTokenId" => + IpamExternalResourceVerificationTokenId, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_ipam_pool(ipam_pool_id) delete_ipam_pool(ipam_pool_id, params::Dict{String,<:Any}) @@ -9693,6 +9802,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"DryRun"`: A check for whether you have the required permissions for the action without actually making the request and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. +- `"NetworkBorderGroup"`: The Availability Zone (AZ) or Local Zone (LZ) network border + group that the resource that the IP address is assigned to is in. Defaults to an AZ network + border group. For more information on available Local Zones, see Local Zone availability in + the Amazon EC2 User Guide. """ function delete_public_ipv4_pool(PoolId; aws_config::AbstractAWSConfig=global_aws_config()) return ec2( @@ -14555,6 +14668,49 @@ function describe_ipam_byoasn( ) end +""" + describe_ipam_external_resource_verification_tokens() + describe_ipam_external_resource_verification_tokens(params::Dict{String,<:Any}) + +Describe verification tokens. A verification token is an Amazon Web Services-generated +random value that you can use to prove ownership of an external resource. For example, you +can use a verification token to validate that you control a public IP address range when +you bring an IP address range to Amazon Web Services (BYOIP). + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DryRun"`: A check for whether you have the required permissions for the action without + actually making the request and provides an error response. If you have the required + permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. +- `"Filter"`: One or more filters for the request. For more information about filtering, + see Filtering CLI output. Available filters: ipam-arn + ipam-external-resource-verification-token-arn + ipam-external-resource-verification-token-id ipam-id ipam-region state + status token-name token-value +- `"IpamExternalResourceVerificationTokenId"`: Verification token IDs. +- `"MaxResults"`: The maximum number of tokens to return in one page of results. +- `"NextToken"`: The token for the next page of results. +""" +function describe_ipam_external_resource_verification_tokens(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return ec2( + "DescribeIpamExternalResourceVerificationTokens"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_ipam_external_resource_verification_tokens( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return ec2( + "DescribeIpamExternalResourceVerificationTokens", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_ipam_pools() describe_ipam_pools(params::Dict{String,<:Any}) @@ -15743,8 +15899,11 @@ end describe_placement_groups() describe_placement_groups(params::Dict{String,<:Any}) -Describes the specified placement groups or all of your placement groups. For more -information, see Placement groups in the Amazon EC2 User Guide. +Describes the specified placement groups or all of your placement groups. To describe a +specific placement group that is shared with your account, you must specify the ID of the +placement group using the GroupId parameter. Specifying the name of a shared placement +group using the GroupNames parameter will result in an error. For more information, see +Placement groups in the Amazon EC2 User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -15762,8 +15921,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"dryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. -- `"groupName"`: The names of the placement groups. Default: Describes all your placement - groups, or only those otherwise specified. +- `"groupName"`: The names of the placement groups. Constraints: You can specify a name + only if the placement group is owned by your account. If a placement group is shared with + your account, specifying the name results in an error. You must use the GroupId parameter + instead. """ function describe_placement_groups(; aws_config::AbstractAWSConfig=global_aws_config()) return ec2( @@ -18215,7 +18376,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys assigned a tag with a specific key, regardless of the tag value. volume-id - The volume ID. volume-type - The Amazon EBS volume type (gp2 | gp3 | io1 | io2 | st1 | sc1| standard) -- `"VolumeId"`: The volume IDs. +- `"VolumeId"`: The volume IDs. If not specified, then all volumes are included in the + response. - `"dryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. @@ -18240,11 +18402,9 @@ end describe_volumes_modifications() describe_volumes_modifications(params::Dict{String,<:Any}) -Describes the most recent volume modification request for the specified EBS volumes. If a -volume has never been modified, some information in the output will be null. If a volume -has been modified more than once, the output includes only the most recent modification -request. For more information, see Monitor the progress of volume modifications in the -Amazon EBS User Guide. +Describes the most recent volume modification request for the specified EBS volumes. For +more information, see Monitor the progress of volume modifications in the Amazon EBS User +Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -21818,13 +21978,8 @@ end Gets the console output for the specified instance. For Linux instances, the instance console output displays the exact console output that would normally be displayed on a physical monitor attached to a computer. For Windows instances, the instance console output -includes the last three system event log errors. By default, the console output returns -buffered information that was posted shortly after an instance transition state (start, -stop, reboot, or terminate). This information is available for at least one hour after the -most recent post. Only the most recent 64 KB of console output is available. You can -optionally retrieve the latest serial console output at any time during the instance -lifecycle. This option is supported on instance types that use the Nitro hypervisor. For -more information, see Instance console output in the Amazon EC2 User Guide. +includes the last three system event log errors. For more information, see Instance console +output in the Amazon EC2 User Guide. # Arguments - `instance_id`: The ID of the instance. @@ -25306,9 +25461,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Virtual Function interface for the instance. There is no way to disable enhanced networking with the Intel 82599 Virtual Function interface at this time. This option is supported only for HVM instances. Specifying this option with a PV instance can make it unreachable. -- `"userData"`: Changes the instance's user data to the specified value. If you are using - an Amazon Web Services SDK or command line tool, base64-encoding is performed for you, and - you can load the text from a file. Otherwise, you must provide base64-encoded text. +- `"userData"`: Changes the instance's user data to the specified value. User data must be + base64-encoded. Depending on the tool or SDK that you're using, the base64-encoding might + be performed for you. For more information, see Work with instance user data. - `"value"`: A new value for the attribute. Use only with the kernel, ramdisk, userData, disableApiTermination, or instanceInitiatedShutdownBehavior attribute. """ @@ -28455,17 +28610,22 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys \"Cidr\" is required. This value will be null if you specify \"NetmaskLength\" and will be filled in during the provisioning process. - `"CidrAuthorizationContext"`: A signed document that proves that you are authorized to - bring a specified IP address range to Amazon using BYOIP. This option applies to public - pools only. + bring a specified IP address range to Amazon using BYOIP. This option only applies to IPv4 + and IPv6 pools in the public scope. - `"ClientToken"`: A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring idempotency. - `"DryRun"`: A check for whether you have the required permissions for the action without actually making the request and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. +- `"IpamExternalResourceVerificationTokenId"`: Verification token ID. This option only + applies to IPv4 and IPv6 pools in the public scope. - `"NetmaskLength"`: The netmask length of the CIDR you'd like to provision to a pool. Can be used for provisioning Amazon-provided IPv6 CIDRs to top-level pools and for provisioning CIDRs to pools with source pools. Cannot be used to provision BYOIP CIDRs to top-level pools. Either \"NetmaskLength\" or \"Cidr\" is required. +- `"VerificationMethod"`: The method for verifying control of a public IP address range. + Defaults to remarks-x509 if not specified. This option only applies to IPv4 and IPv6 pools + in the public scope. """ function provision_ipam_pool_cidr( IpamPoolId; aws_config::AbstractAWSConfig=global_aws_config() @@ -28516,6 +28676,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"DryRun"`: A check for whether you have the required permissions for the action without actually making the request and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. +- `"NetworkBorderGroup"`: The Availability Zone (AZ) or Local Zone (LZ) network border + group that the resource that the IP address is assigned to is in. Defaults to an AZ network + border group. For more information on available Local Zones, see Local Zone availability in + the Amazon EC2 User Guide. """ function provision_public_ipv4_pool_cidr( IpamPoolId, NetmaskLength, PoolId; aws_config::AbstractAWSConfig=global_aws_config() @@ -30951,11 +31115,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys launch. You can specify tags for the following resources only: Instances Volumes Spot Instance requests Network interfaces To tag a resource after it has been created, see CreateTags. -- `"UserData"`: The user data script to make available to the instance. For more - information, see Run commands on your Amazon EC2 instance at launch in the Amazon EC2 User - Guide. If you are using a command line tool, base64-encoding is performed for you, and you - can load the text from a file. Otherwise, you must provide base64-encoded text. User data - is limited to 16 KB. +- `"UserData"`: The user data to make available to the instance. User data must be + base64-encoded. Depending on the tool or SDK that you're using, the base64-encoding might + be performed for you. For more information, see Work with instance user data. - `"additionalInfo"`: Reserved. - `"clientToken"`: Unique, case-sensitive identifier you provide to ensure the idempotency of the request. If you do not specify a client token, a randomly generated token is used diff --git a/src/services/eks.jl b/src/services/eks.jl index 0c0fe28615..010cf36605 100644 --- a/src/services/eks.jl +++ b/src/services/eks.jl @@ -394,6 +394,10 @@ Launching Amazon EKS nodes in the Amazon EKS User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"accessConfig"`: The access configuration for the cluster. +- `"bootstrapSelfManagedAddons"`: If you set this value to False when creating a cluster, + the default networking add-ons will not be installed. The default networking addons include + vpc-cni, coredns, and kube-proxy. Use this option when you plan to install third-party + alternative add-ons or self-manage the default networking add-ons. - `"clientRequestToken"`: A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. - `"encryptionConfig"`: The encryption configuration for the cluster. diff --git a/src/services/entityresolution.jl b/src/services/entityresolution.jl index 6ad0a84154..fb58e8cb54 100644 --- a/src/services/entityresolution.jl +++ b/src/services/entityresolution.jl @@ -17,7 +17,9 @@ GetPolicy API. - `arn`: The Amazon Resource Name (ARN) of the resource that will be accessed by the principal. - `effect`: Determines whether the permissions specified in the policy are to be allowed - (Allow) or denied (Deny). + (Allow) or denied (Deny). If you set the value of the effect parameter to Deny for the + AddPolicyStatement operation, you must also set the value of the effect parameter in the + policy to Deny for the PutPolicy operation. - `principal`: The Amazon Web Services service or Amazon Web Services account that can access the resource defined as ARN. - `statement_id`: A statement identifier that differentiates the statement from others in @@ -116,20 +118,18 @@ function batch_delete_unique_id( end """ - create_id_mapping_workflow(id_mapping_techniques, input_source_config, role_arn, workflow_name) - create_id_mapping_workflow(id_mapping_techniques, input_source_config, role_arn, workflow_name, params::Dict{String,<:Any}) + create_id_mapping_workflow(id_mapping_techniques, input_source_config, workflow_name) + create_id_mapping_workflow(id_mapping_techniques, input_source_config, workflow_name, params::Dict{String,<:Any}) Creates an IdMappingWorkflow object which stores the configuration of the data processing job to be run. Each IdMappingWorkflow must have a unique workflow name. To modify an existing workflow, use the UpdateIdMappingWorkflow API. # Arguments -- `id_mapping_techniques`: An object which defines the idMappingType and the - providerProperties. +- `id_mapping_techniques`: An object which defines the ID mapping technique and any + additional configurations. - `input_source_config`: A list of InputSource objects, which have the fields InputSourceARN and SchemaName. -- `role_arn`: The Amazon Resource Name (ARN) of the IAM role. Entity Resolution assumes - this role to create resources on your behalf as part of workflow execution. - `workflow_name`: The name of the workflow. There can't be multiple IdMappingWorkflows with the same name. @@ -138,12 +138,13 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"description"`: A description of the workflow. - `"outputSourceConfig"`: A list of IdMappingWorkflowOutputSource objects, each of which contains fields OutputS3Path and Output. +- `"roleArn"`: The Amazon Resource Name (ARN) of the IAM role. Entity Resolution assumes + this role to create resources on your behalf as part of workflow execution. - `"tags"`: The tags used to organize, track, or control access for this resource. """ function create_id_mapping_workflow( idMappingTechniques, inputSourceConfig, - roleArn, workflowName; aws_config::AbstractAWSConfig=global_aws_config(), ) @@ -153,7 +154,6 @@ function create_id_mapping_workflow( Dict{String,Any}( "idMappingTechniques" => idMappingTechniques, "inputSourceConfig" => inputSourceConfig, - "roleArn" => roleArn, "workflowName" => workflowName, ); aws_config=aws_config, @@ -163,7 +163,6 @@ end function create_id_mapping_workflow( idMappingTechniques, inputSourceConfig, - roleArn, workflowName, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), @@ -177,7 +176,6 @@ function create_id_mapping_workflow( Dict{String,Any}( "idMappingTechniques" => idMappingTechniques, "inputSourceConfig" => inputSourceConfig, - "roleArn" => roleArn, "workflowName" => workflowName, ), params, @@ -1131,7 +1129,9 @@ Updates the resource-based policy. # Arguments - `arn`: The Amazon Resource Name (ARN) of the resource for which the policy needs to be updated. -- `policy`: The resource-based policy. +- `policy`: The resource-based policy. If you set the value of the effect parameter in the + policy to Deny for the PutPolicy operation, you must also set the value of the effect + parameter to Deny for the AddPolicyStatement operation. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1314,20 +1314,18 @@ function untag_resource( end """ - update_id_mapping_workflow(id_mapping_techniques, input_source_config, role_arn, workflow_name) - update_id_mapping_workflow(id_mapping_techniques, input_source_config, role_arn, workflow_name, params::Dict{String,<:Any}) + update_id_mapping_workflow(id_mapping_techniques, input_source_config, workflow_name) + update_id_mapping_workflow(id_mapping_techniques, input_source_config, workflow_name, params::Dict{String,<:Any}) Updates an existing IdMappingWorkflow. This method is identical to CreateIdMappingWorkflow, except it uses an HTTP PUT request instead of a POST request, and the IdMappingWorkflow must already exist for the method to succeed. # Arguments -- `id_mapping_techniques`: An object which defines the idMappingType and the - providerProperties. +- `id_mapping_techniques`: An object which defines the ID mapping technique and any + additional configurations. - `input_source_config`: A list of InputSource objects, which have the fields InputSourceARN and SchemaName. -- `role_arn`: The Amazon Resource Name (ARN) of the IAM role. Entity Resolution assumes - this role to access Amazon Web Services resources on your behalf. - `workflow_name`: The name of the workflow. # Optional Parameters @@ -1335,11 +1333,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"description"`: A description of the workflow. - `"outputSourceConfig"`: A list of OutputSource objects, each of which contains fields OutputS3Path and KMSArn. +- `"roleArn"`: The Amazon Resource Name (ARN) of the IAM role. Entity Resolution assumes + this role to access Amazon Web Services resources on your behalf. """ function update_id_mapping_workflow( idMappingTechniques, inputSourceConfig, - roleArn, workflowName; aws_config::AbstractAWSConfig=global_aws_config(), ) @@ -1349,7 +1348,6 @@ function update_id_mapping_workflow( Dict{String,Any}( "idMappingTechniques" => idMappingTechniques, "inputSourceConfig" => inputSourceConfig, - "roleArn" => roleArn, ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -1358,7 +1356,6 @@ end function update_id_mapping_workflow( idMappingTechniques, inputSourceConfig, - roleArn, workflowName, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), @@ -1372,7 +1369,6 @@ function update_id_mapping_workflow( Dict{String,Any}( "idMappingTechniques" => idMappingTechniques, "inputSourceConfig" => inputSourceConfig, - "roleArn" => roleArn, ), params, ), diff --git a/src/services/firehose.jl b/src/services/firehose.jl index d50f06233d..81040a0147 100644 --- a/src/services/firehose.jl +++ b/src/services/firehose.jl @@ -77,6 +77,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys only one destination. - `"HttpEndpointDestinationConfiguration"`: Enables configuring Kinesis Firehose to deliver data to any HTTP endpoint destination. You can specify only one destination. +- `"IcebergDestinationConfiguration"`: Configure Apache Iceberg Tables destination. + Amazon Data Firehose is in preview release and is subject to change. - `"KinesisStreamSourceConfiguration"`: When a Kinesis data stream is used as the source for the delivery stream, a KinesisStreamSourceConfiguration containing the Kinesis data stream Amazon Resource Name (ARN) and the role ARN for the source stream. @@ -744,6 +746,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"ExtendedS3DestinationUpdate"`: Describes an update for a destination in Amazon S3. - `"HttpEndpointDestinationUpdate"`: Describes an update to the specified HTTP endpoint destination. +- `"IcebergDestinationUpdate"`: Describes an update for a destination in Apache Iceberg + Tables. Amazon Data Firehose is in preview release and is subject to change. - `"RedshiftDestinationUpdate"`: Describes an update for a destination in Amazon Redshift. - `"S3DestinationUpdate"`: [Deprecated] Describes an update for a destination in Amazon S3. - `"SnowflakeDestinationUpdate"`: Update to the Snowflake destination configuration diff --git a/src/services/glue.jl b/src/services/glue.jl index 381075a80b..7aba9bc4ab 100644 --- a/src/services/glue.jl +++ b/src/services/glue.jl @@ -4194,6 +4194,8 @@ Retrieves all databases defined in a given Data Catalog. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AttributesToGet"`: Specifies the database fields returned by the GetDatabases call. + This parameter doesn’t accept an empty list. The request must include the NAME. - `"CatalogId"`: The ID of the Data Catalog from which to retrieve Databases. If none is provided, the Amazon Web Services account ID is used by default. - `"MaxResults"`: The maximum number of databases to return in one response. diff --git a/src/services/iotsitewise.jl b/src/services/iotsitewise.jl index 69d92c5fbf..81cc8f26d5 100644 --- a/src/services/iotsitewise.jl +++ b/src/services/iotsitewise.jl @@ -559,7 +559,7 @@ reusable component that you can include in the composite models of other asset m can't create assets directly from this type of asset model. # Arguments -- `asset_model_name`: A unique, friendly name for the asset model. +- `asset_model_name`: A unique name for the asset model. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -568,7 +568,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys composite models that model parts of your industrial equipment. Each composite model has a type that defines the properties that the composite model supports. Use composite models to define alarms on this asset model. When creating custom composite models, you need to use - CreateAssetModelCompositeModel. For more information, see <LINK>. + CreateAssetModelCompositeModel. For more information, see Creating custom composite models + (Components) in the IoT SiteWise User Guide. - `"assetModelDescription"`: A description for the asset model. - `"assetModelExternalId"`: An external ID to assign to the asset model. The external ID must be unique within your Amazon Web Services account. For more information, see Using @@ -649,7 +650,7 @@ with assetModelType of COMPONENT_MODEL. To create an inline model, specify the assetModelCompositeModelProperties and don't include an composedAssetModelId. # Arguments -- `asset_model_composite_model_name`: A unique, friendly name for the composite model. +- `asset_model_composite_model_name`: A unique name for the composite model. - `asset_model_composite_model_type`: The composite model type. Valid values are AWS/ALARM, CUSTOM, or AWS/L4E_ANOMALY. - `asset_model_id`: The ID of the asset model this composite model is a part of. @@ -666,12 +667,14 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys to supply your own ID instead, you can specify it here in UUID format. If you specify your own ID, it must be globally unique. - `"assetModelCompositeModelProperties"`: The property definitions of the composite model. - For more information, see <LINK>. You can specify up to 200 properties per composite - model. For more information, see Quotas in the IoT SiteWise User Guide. + For more information, see Inline custom composite models in the IoT SiteWise User Guide. + You can specify up to 200 properties per composite model. For more information, see Quotas + in the IoT SiteWise User Guide. - `"clientToken"`: A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required. -- `"composedAssetModelId"`: The ID of a composite model on this asset. +- `"composedAssetModelId"`: The ID of a component model which is reused to create this + composite model. - `"parentAssetModelCompositeModelId"`: The ID of the parent composite model in this asset model relationship. """ @@ -875,7 +878,7 @@ from local servers to IoT SiteWise. For more information, see Ingesting data usi gateway in the IoT SiteWise User Guide. # Arguments -- `gateway_name`: A unique, friendly name for the gateway. +- `gateway_name`: A unique name for the gateway. - `gateway_platform`: The gateway's platform. You can only specify one platform in a gateway. @@ -3622,19 +3625,19 @@ end Updates an asset model and all of the assets that were created from the model. Each asset created from the model inherits the updated asset model's property and hierarchy definitions. For more information, see Updating assets and models in the IoT SiteWise User -Guide. This operation overwrites the existing model with the provided model. To avoid -deleting your asset model's properties or hierarchies, you must include their IDs and -definitions in the updated asset model payload. For more information, see -DescribeAssetModel. If you remove a property from an asset model, IoT SiteWise deletes all -previous data for that property. If you remove a hierarchy definition from an asset model, -IoT SiteWise disassociates every asset associated with that hierarchy. You can't change the -type or data type of an existing property. +Guide. If you remove a property from an asset model, IoT SiteWise deletes all previous +data for that property. You can’t change the type or data type of an existing property. +To replace an existing asset model property with a new one with the same name, do the +following: Submit an UpdateAssetModel request with the entire existing property removed. + Submit a second UpdateAssetModel request that includes the new property. The new asset +property will have the same name as the previous one and IoT SiteWise will generate a new +unique id. # Arguments - `asset_model_id`: The ID of the asset model to update. This can be either the actual ID in UUID format, or else externalId: followed by the external ID, if it has one. For more information, see Referencing objects with external IDs in the IoT SiteWise User Guide. -- `asset_model_name`: A unique, friendly name for the asset model. +- `asset_model_name`: A unique name for the asset model. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -3643,7 +3646,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys composite models that model parts of your industrial equipment. Each composite model has a type that defines the properties that the composite model supports. Use composite models to define alarms on this asset model. When creating custom composite models, you need to use - CreateAssetModelCompositeModel. For more information, see <LINK>. + CreateAssetModelCompositeModel. For more information, see Creating custom composite models + (Components) in the IoT SiteWise User Guide. - `"assetModelDescription"`: A description for the asset model. - `"assetModelExternalId"`: An external ID to assign to the asset model. The asset model must not already have an external ID. The external ID must be unique within your Amazon Web @@ -3714,7 +3718,7 @@ previous one and IoT SiteWise will generate a new unique id. # Arguments - `asset_model_composite_model_id`: The ID of a composite model on this asset model. -- `asset_model_composite_model_name`: A unique, friendly name for the composite model. +- `asset_model_composite_model_name`: A unique name for the composite model. - `asset_model_id`: The ID of the asset model, in UUID format. # Optional Parameters @@ -3724,8 +3728,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys can only set the external ID of the asset model if it wasn't set when it was created, or you're setting it to the exact same thing as when it was created. - `"assetModelCompositeModelProperties"`: The property definitions of the composite model. - For more information, see <LINK>. You can specify up to 200 properties per composite - model. For more information, see Quotas in the IoT SiteWise User Guide. + For more information, see Inline custom composite models in the IoT SiteWise User Guide. + You can specify up to 200 properties per composite model. For more information, see Quotas + in the IoT SiteWise User Guide. - `"clientToken"`: A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required. @@ -3906,7 +3911,7 @@ Updates a gateway's name. # Arguments - `gateway_id`: The ID of the gateway to update. -- `gateway_name`: A unique, friendly name for the gateway. +- `gateway_name`: A unique name for the gateway. """ function update_gateway( diff --git a/src/services/ivs_realtime.jl b/src/services/ivs_realtime.jl index 8785c3e1d2..c298347d4e 100644 --- a/src/services/ivs_realtime.jl +++ b/src/services/ivs_realtime.jl @@ -100,8 +100,8 @@ Creates a new stage (and optionally participant tokens). # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"autoParticipantRecordingConfiguration"`: Auto participant recording configuration - object attached to the stage. +- `"autoParticipantRecordingConfiguration"`: Configuration object for individual + participant recording, to attach to the new stage. - `"name"`: Optional name that can be specified for the stage being created. - `"participantTokenConfigurations"`: Array of participant token configuration objects to attach to the new stage. @@ -203,6 +203,38 @@ function delete_encoder_configuration( ) end +""" + delete_public_key(arn) + delete_public_key(arn, params::Dict{String,<:Any}) + +Deletes the specified public key used to sign stage participant tokens. This invalidates +future participant tokens generated using the key pair’s private key. + +# Arguments +- `arn`: ARN of the public key to be deleted. + +""" +function delete_public_key(arn; aws_config::AbstractAWSConfig=global_aws_config()) + return ivs_realtime( + "POST", + "/DeletePublicKey", + Dict{String,Any}("arn" => arn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_public_key( + arn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return ivs_realtime( + "POST", + "/DeletePublicKey", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("arn" => arn), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_stage(arn) delete_stage(arn, params::Dict{String,<:Any}) @@ -434,6 +466,37 @@ function get_participant( ) end +""" + get_public_key(arn) + get_public_key(arn, params::Dict{String,<:Any}) + +Gets information for the specified public key. + +# Arguments +- `arn`: ARN of the public key for which the information is to be retrieved. + +""" +function get_public_key(arn; aws_config::AbstractAWSConfig=global_aws_config()) + return ivs_realtime( + "POST", + "/GetPublicKey", + Dict{String,Any}("arn" => arn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_public_key( + arn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return ivs_realtime( + "POST", + "/GetPublicKey", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("arn" => arn), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_stage(arn) get_stage(arn, params::Dict{String,<:Any}) @@ -539,6 +602,52 @@ function get_storage_configuration( ) end +""" + import_public_key(public_key_material) + import_public_key(public_key_material, params::Dict{String,<:Any}) + +Import a public key to be used for signing stage participant tokens. + +# Arguments +- `public_key_material`: The content of the public key to be imported. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"name"`: Name of the public key to be imported. +- `"tags"`: Tags attached to the resource. Array of maps, each of the form string:string + (key:value). See Tagging AWS Resources for details, including restrictions that apply to + tags and \"Tag naming limits and requirements\"; Amazon IVS has no constraints on tags + beyond what is documented there. +""" +function import_public_key( + publicKeyMaterial; aws_config::AbstractAWSConfig=global_aws_config() +) + return ivs_realtime( + "POST", + "/ImportPublicKey", + Dict{String,Any}("publicKeyMaterial" => publicKeyMaterial); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function import_public_key( + publicKeyMaterial, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return ivs_realtime( + "POST", + "/ImportPublicKey", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("publicKeyMaterial" => publicKeyMaterial), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_compositions() list_compositions(params::Dict{String,<:Any}) @@ -725,6 +834,36 @@ function list_participants( ) end +""" + list_public_keys() + list_public_keys(params::Dict{String,<:Any}) + +Gets summary information about all public keys in your account, in the AWS region where the +API request is processed. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: Maximum number of results to return. Default: 50. +- `"nextToken"`: The first public key to retrieve. This is used for pagination; see the + nextToken response field. +""" +function list_public_keys(; aws_config::AbstractAWSConfig=global_aws_config()) + return ivs_realtime( + "POST", "/ListPublicKeys"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_public_keys( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return ivs_realtime( + "POST", + "/ListPublicKeys", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_stage_sessions(stage_arn) list_stage_sessions(stage_arn, params::Dict{String,<:Any}) @@ -1049,9 +1188,9 @@ Updates a stage’s configuration. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"autoParticipantRecordingConfiguration"`: Auto-participant-recording configuration - object to attach to the stage. Auto-participant-recording configuration cannot be updated - while recording is active. +- `"autoParticipantRecordingConfiguration"`: Configuration object for individual + participant recording, to attach to the stage. Note that this cannot be updated while + recording is active. - `"name"`: Name of the stage to be updated. """ function update_stage(arn; aws_config::AbstractAWSConfig=global_aws_config()) diff --git a/src/services/kinesis_analytics_v2.jl b/src/services/kinesis_analytics_v2.jl index d8f45ba1f9..832d11d6c1 100644 --- a/src/services/kinesis_analytics_v2.jl +++ b/src/services/kinesis_analytics_v2.jl @@ -1012,6 +1012,52 @@ function describe_application( ) end +""" + describe_application_operation(application_name, operation_id) + describe_application_operation(application_name, operation_id, params::Dict{String,<:Any}) + +Returns information about a specific operation performed on a Managed Service for Apache +Flink application + +# Arguments +- `application_name`: +- `operation_id`: + +""" +function describe_application_operation( + ApplicationName, OperationId; aws_config::AbstractAWSConfig=global_aws_config() +) + return kinesis_analytics_v2( + "DescribeApplicationOperation", + Dict{String,Any}( + "ApplicationName" => ApplicationName, "OperationId" => OperationId + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_application_operation( + ApplicationName, + OperationId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return kinesis_analytics_v2( + "DescribeApplicationOperation", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ApplicationName" => ApplicationName, "OperationId" => OperationId + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_application_snapshot(application_name, snapshot_name) describe_application_snapshot(application_name, snapshot_name, params::Dict{String,<:Any}) @@ -1164,6 +1210,50 @@ function discover_input_schema( ) end +""" + list_application_operations(application_name) + list_application_operations(application_name, params::Dict{String,<:Any}) + +Lists information about operations performed on a Managed Service for Apache Flink +application + +# Arguments +- `application_name`: + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Limit"`: +- `"NextToken"`: +- `"Operation"`: +- `"OperationStatus"`: +""" +function list_application_operations( + ApplicationName; aws_config::AbstractAWSConfig=global_aws_config() +) + return kinesis_analytics_v2( + "ListApplicationOperations", + Dict{String,Any}("ApplicationName" => ApplicationName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_application_operations( + ApplicationName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return kinesis_analytics_v2( + "ListApplicationOperations", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("ApplicationName" => ApplicationName), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_application_snapshots(application_name) list_application_snapshots(application_name, params::Dict{String,<:Any}) @@ -1324,11 +1414,10 @@ end rollback_application(application_name, current_application_version_id, params::Dict{String,<:Any}) Reverts the application to the previous running version. You can roll back an application -if you suspect it is stuck in a transient status. You can roll back an application only if -it is in the UPDATING or AUTOSCALING status. When you rollback an application, it loads -state data from the last successful snapshot. If the application has no snapshots, Managed -Service for Apache Flink rejects the rollback request. This action is not supported for -Managed Service for Apache Flink for SQL applications. +if you suspect it is stuck in a transient status or in the running status. You can roll +back an application only if it is in the UPDATING, AUTOSCALING, or RUNNING statuses. When +you rollback an application, it loads state data from the last successful snapshot. If the +application has no snapshots, Managed Service for Apache Flink rejects the rollback request. # Arguments - `application_name`: The name of the application. diff --git a/src/services/license_manager_linux_subscriptions.jl b/src/services/license_manager_linux_subscriptions.jl index f9c028f39e..7505e96c4f 100644 --- a/src/services/license_manager_linux_subscriptions.jl +++ b/src/services/license_manager_linux_subscriptions.jl @@ -4,11 +4,97 @@ using AWS.AWSServices: license_manager_linux_subscriptions using AWS.Compat using AWS.UUIDs +""" + deregister_subscription_provider(subscription_provider_arn) + deregister_subscription_provider(subscription_provider_arn, params::Dict{String,<:Any}) + +Remove a third-party subscription provider from the Bring Your Own License (BYOL) +subscriptions registered to your account. + +# Arguments +- `subscription_provider_arn`: The Amazon Resource Name (ARN) of the subscription provider + resource to deregister. + +""" +function deregister_subscription_provider( + SubscriptionProviderArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return license_manager_linux_subscriptions( + "POST", + "/subscription/DeregisterSubscriptionProvider", + Dict{String,Any}("SubscriptionProviderArn" => SubscriptionProviderArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function deregister_subscription_provider( + SubscriptionProviderArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return license_manager_linux_subscriptions( + "POST", + "/subscription/DeregisterSubscriptionProvider", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("SubscriptionProviderArn" => SubscriptionProviderArn), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_registered_subscription_provider(subscription_provider_arn) + get_registered_subscription_provider(subscription_provider_arn, params::Dict{String,<:Any}) + +Get details for a Bring Your Own License (BYOL) subscription that's registered to your +account. + +# Arguments +- `subscription_provider_arn`: The Amazon Resource Name (ARN) of the BYOL registration + resource to get details for. + +""" +function get_registered_subscription_provider( + SubscriptionProviderArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return license_manager_linux_subscriptions( + "POST", + "/subscription/GetRegisteredSubscriptionProvider", + Dict{String,Any}("SubscriptionProviderArn" => SubscriptionProviderArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_registered_subscription_provider( + SubscriptionProviderArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return license_manager_linux_subscriptions( + "POST", + "/subscription/GetRegisteredSubscriptionProvider", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("SubscriptionProviderArn" => SubscriptionProviderArn), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_service_settings() get_service_settings(params::Dict{String,<:Any}) -Lists the Linux subscriptions service settings. +Lists the Linux subscriptions service settings for your account. """ function get_service_settings(; aws_config::AbstractAWSConfig=global_aws_config()) @@ -40,15 +126,17 @@ subscriptions. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Filters"`: An array of structures that you can use to filter the results to those that - match one or more sets of key-value pairs that you specify. For example, you can filter by - the name of AmiID with an optional operator to see subscriptions that match, partially - match, or don't match a certain Amazon Machine Image (AMI) ID. The valid names for this - filter are: AmiID InstanceID AccountID Status Region UsageOperation - ProductCode InstanceType The valid Operators for this filter are: contains - equals Notequal -- `"MaxResults"`: Maximum number of results to return in a single call. -- `"NextToken"`: Token for the next set of results. +- `"Filters"`: An array of structures that you can use to filter the results by your + specified criteria. For example, you can specify Region in the Name, with the contains + operator to list all subscriptions that match a partial string in the Value, such as + us-west. For each filter, you can specify one of the following values for the Name key to + streamline results: AccountID AmiID DualSubscription InstanceID + InstanceType ProductCode Region Status UsageOperation For each filter, + you can use one of the following Operator values to define the behavior of the filter: + contains equals Notequal +- `"MaxResults"`: The maximum items to return in a request. +- `"NextToken"`: A token to specify where to start paginating. This is the nextToken from a + previously truncated response. """ function list_linux_subscription_instances(; aws_config::AbstractAWSConfig=global_aws_config() @@ -88,8 +176,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys partially match, or don't match a certain subscription's name. The valid names for this filter are: Subscription The valid Operators for this filter are: contains equals Notequal -- `"MaxResults"`: Maximum number of results to return in a single call. -- `"NextToken"`: Token for the next set of results. +- `"MaxResults"`: The maximum items to return in a request. +- `"NextToken"`: A token to specify where to start paginating. This is the nextToken from a + previously truncated response. """ function list_linux_subscriptions(; aws_config::AbstractAWSConfig=global_aws_config()) return license_manager_linux_subscriptions( @@ -111,6 +200,208 @@ function list_linux_subscriptions( ) end +""" + list_registered_subscription_providers() + list_registered_subscription_providers(params::Dict{String,<:Any}) + +List Bring Your Own License (BYOL) subscription registration resources for your account. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: The maximum items to return in a request. +- `"NextToken"`: A token to specify where to start paginating. This is the nextToken from a + previously truncated response. +- `"SubscriptionProviderSources"`: To filter your results, specify which subscription + providers to return in the list. +""" +function list_registered_subscription_providers(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return license_manager_linux_subscriptions( + "POST", + "/subscription/ListRegisteredSubscriptionProviders"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_registered_subscription_providers( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return license_manager_linux_subscriptions( + "POST", + "/subscription/ListRegisteredSubscriptionProviders", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_tags_for_resource(resource_arn) + list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) + +List the metadata tags that are assigned to the specified Amazon Web Services resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the resource for which to list metadata + tags. + +""" +function list_tags_for_resource( + resourceArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return license_manager_linux_subscriptions( + "GET", + "/tags/$(resourceArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_tags_for_resource( + resourceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return license_manager_linux_subscriptions( + "GET", + "/tags/$(resourceArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + register_subscription_provider(secret_arn, subscription_provider_source) + register_subscription_provider(secret_arn, subscription_provider_source, params::Dict{String,<:Any}) + +Register the supported third-party subscription provider for your Bring Your Own License +(BYOL) subscription. + +# Arguments +- `secret_arn`: The Amazon Resource Name (ARN) of the secret where you've stored your + subscription provider's access token. For RHEL subscriptions managed through the Red Hat + Subscription Manager (RHSM), the secret contains your Red Hat Offline token. +- `subscription_provider_source`: The supported Linux subscription provider to register. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Tags"`: The metadata tags to assign to your registered Linux subscription provider + resource. +""" +function register_subscription_provider( + SecretArn, SubscriptionProviderSource; aws_config::AbstractAWSConfig=global_aws_config() +) + return license_manager_linux_subscriptions( + "POST", + "/subscription/RegisterSubscriptionProvider", + Dict{String,Any}( + "SecretArn" => SecretArn, + "SubscriptionProviderSource" => SubscriptionProviderSource, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function register_subscription_provider( + SecretArn, + SubscriptionProviderSource, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return license_manager_linux_subscriptions( + "POST", + "/subscription/RegisterSubscriptionProvider", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "SecretArn" => SecretArn, + "SubscriptionProviderSource" => SubscriptionProviderSource, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + tag_resource(resource_arn, tags) + tag_resource(resource_arn, tags, params::Dict{String,<:Any}) + +Add metadata tags to the specified Amazon Web Services resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the Amazon Web Services resource to + which to add the specified metadata tags. +- `tags`: The metadata tags to assign to the Amazon Web Services resource. Tags are + formatted as key value pairs. + +""" +function tag_resource(resourceArn, tags; aws_config::AbstractAWSConfig=global_aws_config()) + return license_manager_linux_subscriptions( + "PUT", + "/tags/$(resourceArn)", + Dict{String,Any}("tags" => tags); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function tag_resource( + resourceArn, + tags, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return license_manager_linux_subscriptions( + "PUT", + "/tags/$(resourceArn)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tags" => tags), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + untag_resource(resource_arn, tag_keys) + untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) + +Remove one or more metadata tag from the specified Amazon Web Services resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the Amazon Web Services resource to + remove the metadata tags from. +- `tag_keys`: A list of metadata tag keys to remove from the requested resource. + +""" +function untag_resource( + resourceArn, tagKeys; aws_config::AbstractAWSConfig=global_aws_config() +) + return license_manager_linux_subscriptions( + "DELETE", + "/tags/$(resourceArn)", + Dict{String,Any}("tagKeys" => tagKeys); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function untag_resource( + resourceArn, + tagKeys, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return license_manager_linux_subscriptions( + "DELETE", + "/tags/$(resourceArn)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tagKeys" => tagKeys), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_service_settings(linux_subscriptions_discovery, linux_subscriptions_discovery_settings) update_service_settings(linux_subscriptions_discovery, linux_subscriptions_discovery_settings, params::Dict{String,<:Any}) diff --git a/src/services/mediaconnect.jl b/src/services/mediaconnect.jl index f286958480..074bd5329e 100644 --- a/src/services/mediaconnect.jl +++ b/src/services/mediaconnect.jl @@ -1861,6 +1861,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys represents the minimal potential latency of that connection. The latency of the stream is set to the highest number between the sender’s minimum latency and the receiver’s minimum latency. +- `"outputStatus"`: An indication of whether the output should transmit data or not. If you + don't specify the outputStatus field in your request, MediaConnect leaves the value + unchanged. - `"port"`: The port to use when content is distributed to this output. - `"protocol"`: The protocol to use for the output. - `"remoteId"`: The remote ID for the Zixi-pull stream. diff --git a/src/services/medialive.jl b/src/services/medialive.jl index 6a03c9a9a0..2e3694ab5a 100644 --- a/src/services/medialive.jl +++ b/src/services/medialive.jl @@ -543,6 +543,7 @@ exactly two source URLs for redundancy. Only specify sources for PULL type Inputs. Leave Destinations empty. +- `"srtSettings"`: The settings associated with an SRT input. - `"tags"`: A collection of key-value pairs. - `"type"`: - `"vpc"`: @@ -3294,6 +3295,7 @@ exactly two source URLs for redundancy. Only specify sources for PULL type Inputs. Leave Destinations empty. +- `"srtSettings"`: The settings associated with an SRT input. """ function update_input(inputId; aws_config::AbstractAWSConfig=global_aws_config()) return medialive( diff --git a/src/services/medical_imaging.jl b/src/services/medical_imaging.jl index 7467de1fc8..fb35fb86a0 100644 --- a/src/services/medical_imaging.jl +++ b/src/services/medical_imaging.jl @@ -15,6 +15,11 @@ Copy an image set. - `datastore_id`: The data store identifier. - `source_image_set_id`: The source image set identifier. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"force"`: Setting this flag will force the CopyImageSet operation, even if Patient, + Study, or Series level metadata are mismatched across the sourceImageSet and + destinationImageSet. """ function copy_image_set( copyImageSetInformation, @@ -705,6 +710,12 @@ Update image set metadata attributes. - `latest_version`: The latest image set version identifier. - `update_image_set_metadata_updates`: Update image set metadata updates. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"force"`: Setting this flag will force the UpdateImageSetMetadata operation for the + following attributes: Tag.StudyInstanceUID, Tag.SeriesInstanceUID, Tag.SOPInstanceUID, + and Tag.StudyID Adding, removing, or updating private tags for an individual SOP + Instance """ function update_image_set_metadata( datastoreId, diff --git a/src/services/mobile.jl b/src/services/mobile.jl deleted file mode 100644 index d348dc1d08..0000000000 --- a/src/services/mobile.jl +++ /dev/null @@ -1,299 +0,0 @@ -# This file is auto-generated by AWSMetadata.jl -using AWS -using AWS.AWSServices: mobile -using AWS.Compat -using AWS.UUIDs - -""" - create_project() - create_project(params::Dict{String,<:Any}) - - Creates an AWS Mobile Hub project. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"contents"`: ZIP or YAML file which contains configuration settings to be used when - creating the project. This may be the contents of the file downloaded from the URL provided - in an export project operation. -- `"name"`: Name of the project. -- `"region"`: Default region where project resources should be created. -- `"snapshotId"`: Unique identifier for an exported snapshot of project configuration. - This snapshot identifier is included in the share URL when a project is exported. -""" -function create_project(; aws_config::AbstractAWSConfig=global_aws_config()) - return mobile( - "POST", "/projects"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end -function create_project( - params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return mobile( - "POST", "/projects", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end - -""" - delete_project(project_id) - delete_project(project_id, params::Dict{String,<:Any}) - - Delets a project in AWS Mobile Hub. - -# Arguments -- `project_id`: Unique project identifier. - -""" -function delete_project(projectId; aws_config::AbstractAWSConfig=global_aws_config()) - return mobile( - "DELETE", - "/projects/$(projectId)"; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function delete_project( - projectId, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return mobile( - "DELETE", - "/projects/$(projectId)", - params; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - describe_bundle(bundle_id) - describe_bundle(bundle_id, params::Dict{String,<:Any}) - - Get the bundle details for the requested bundle id. - -# Arguments -- `bundle_id`: Unique bundle identifier. - -""" -function describe_bundle(bundleId; aws_config::AbstractAWSConfig=global_aws_config()) - return mobile( - "GET", - "/bundles/$(bundleId)"; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function describe_bundle( - bundleId, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return mobile( - "GET", - "/bundles/$(bundleId)", - params; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - describe_project(project_id) - describe_project(project_id, params::Dict{String,<:Any}) - - Gets details about a project in AWS Mobile Hub. - -# Arguments -- `project_id`: Unique project identifier. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"syncFromResources"`: If set to true, causes AWS Mobile Hub to synchronize information - from other services, e.g., update state of AWS CloudFormation stacks in the AWS Mobile Hub - project. -""" -function describe_project(projectId; aws_config::AbstractAWSConfig=global_aws_config()) - return mobile( - "GET", - "/project", - Dict{String,Any}("projectId" => projectId); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function describe_project( - projectId, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return mobile( - "GET", - "/project", - Dict{String,Any}( - mergewith(_merge, Dict{String,Any}("projectId" => projectId), params) - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - export_bundle(bundle_id) - export_bundle(bundle_id, params::Dict{String,<:Any}) - - Generates customized software development kit (SDK) and or tool packages used to integrate -mobile web or mobile app clients with backend AWS resources. - -# Arguments -- `bundle_id`: Unique bundle identifier. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"platform"`: Developer desktop or target application platform. -- `"projectId"`: Unique project identifier. -""" -function export_bundle(bundleId; aws_config::AbstractAWSConfig=global_aws_config()) - return mobile( - "POST", - "/bundles/$(bundleId)"; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function export_bundle( - bundleId, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return mobile( - "POST", - "/bundles/$(bundleId)", - params; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - export_project(project_id) - export_project(project_id, params::Dict{String,<:Any}) - - Exports project configuration to a snapshot which can be downloaded and shared. Note that -mobile app push credentials are encrypted in exported projects, so they can only be shared -successfully within the same AWS account. - -# Arguments -- `project_id`: Unique project identifier. - -""" -function export_project(projectId; aws_config::AbstractAWSConfig=global_aws_config()) - return mobile( - "POST", - "/exports/$(projectId)"; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function export_project( - projectId, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return mobile( - "POST", - "/exports/$(projectId)", - params; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - list_bundles() - list_bundles(params::Dict{String,<:Any}) - - List all available bundles. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"maxResults"`: Maximum number of records to list in a single response. -- `"nextToken"`: Pagination token. Set to null to start listing bundles from start. If - non-null pagination token is returned in a result, then pass its value in here in another - request to list more bundles. -""" -function list_bundles(; aws_config::AbstractAWSConfig=global_aws_config()) - return mobile("GET", "/bundles"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET) -end -function list_bundles( - params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return mobile( - "GET", "/bundles", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end - -""" - list_projects() - list_projects(params::Dict{String,<:Any}) - - Lists projects in AWS Mobile Hub. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"maxResults"`: Maximum number of records to list in a single response. -- `"nextToken"`: Pagination token. Set to null to start listing projects from start. If - non-null pagination token is returned in a result, then pass its value in here in another - request to list more projects. -""" -function list_projects(; aws_config::AbstractAWSConfig=global_aws_config()) - return mobile( - "GET", "/projects"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end -function list_projects( - params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return mobile( - "GET", "/projects", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end - -""" - update_project(project_id) - update_project(project_id, params::Dict{String,<:Any}) - - Update an existing project. - -# Arguments -- `project_id`: Unique project identifier. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"contents"`: ZIP or YAML file which contains project configuration to be updated. This - should be the contents of the file downloaded from the URL provided in an export project - operation. -""" -function update_project(projectId; aws_config::AbstractAWSConfig=global_aws_config()) - return mobile( - "POST", - "/update", - Dict{String,Any}("projectId" => projectId); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function update_project( - projectId, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return mobile( - "POST", - "/update", - Dict{String,Any}( - mergewith(_merge, Dict{String,Any}("projectId" => projectId), params) - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end diff --git a/src/services/mq.jl b/src/services/mq.jl index 69f6b4e24a..8d8ef4ffb5 100644 --- a/src/services/mq.jl +++ b/src/services/mq.jl @@ -5,8 +5,8 @@ using AWS.Compat using AWS.UUIDs """ - create_broker(auto_minor_version_upgrade, broker_name, deployment_mode, engine_type, engine_version, host_instance_type, publicly_accessible, users) - create_broker(auto_minor_version_upgrade, broker_name, deployment_mode, engine_type, engine_version, host_instance_type, publicly_accessible, users, params::Dict{String,<:Any}) + create_broker(broker_name, deployment_mode, engine_type, host_instance_type, publicly_accessible, users) + create_broker(broker_name, deployment_mode, engine_type, host_instance_type, publicly_accessible, users, params::Dict{String,<:Any}) Creates a broker. Note: This API is asynchronous. To create a broker, you must either use the AmazonMQFullAccess IAM policy or include the following EC2 permissions in your IAM @@ -21,10 +21,6 @@ Your Amazon Web Services Credentials and Never Modify or Delete the Amazon MQ El Network Interface in the Amazon MQ Developer Guide. # Arguments -- `auto_minor_version_upgrade`: Enables automatic upgrades to new minor versions for - brokers, as new versions are released and supported by Amazon MQ. Automatic upgrades occur - during the scheduled maintenance window of the broker or after a manual broker reboot. Set - to true by default, if no value is specified. - `broker_name`: Required. The broker's name. This value must be unique in your Amazon Web Services account, 1-50 characters long, must contain only letters, numbers, dashes, and underscores, and must not contain white spaces, brackets, wildcard characters, or special @@ -35,8 +31,6 @@ Network Interface in the Amazon MQ Developer Guide. - `deployment_mode`: Required. The broker's deployment mode. - `engine_type`: Required. The type of broker engine. Currently, Amazon MQ supports ACTIVEMQ and RABBITMQ. -- `engine_version`: Required. The broker engine's version. For a list of supported engine - versions, see Supported engines. - `host_instance_type`: Required. The broker's instance type. - `publicly_accessible`: Enables connections from applications outside of the VPC that hosts the broker's subnets. Set to false by default, if no value is provided. @@ -49,6 +43,11 @@ Network Interface in the Amazon MQ Developer Guide. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"authenticationStrategy"`: Optional. The authentication strategy used to secure the broker. The default is SIMPLE. +- `"autoMinorVersionUpgrade"`: Enables automatic upgrades to new patch versions for brokers + as new versions are released and supported by Amazon MQ. Automatic upgrades occur during + the scheduled maintenance window or after a manual broker reboot. Set to true by default, + if no value is specified. Must be set to true for ActiveMQ brokers version 5.18 and above + and for RabbitMQ brokers version 3.13 and above. - `"configuration"`: A list of information about the configuration. - `"creatorRequestId"`: The unique ID that the requester receives for the created broker. Amazon MQ passes your ID with the API action. We recommend using a Universally Unique @@ -59,6 +58,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys that is used to replicate data from in a data replication pair, and is applied to the replica broker. Must be set when dataReplicationMode is set to CRDR. - `"encryptionOptions"`: Encryption options for the broker. +- `"engineVersion"`: The broker engine version. Defaults to the latest available version + for the specified broker engine type. For more information, see the ActiveMQ version + management and the RabbitMQ version management sections in the Amazon MQ Developer Guide. - `"ldapServerMetadata"`: Optional. The metadata of the LDAP server used to authenticate and authorize connections to the broker. Does not apply to RabbitMQ brokers. - `"logs"`: Enables Amazon CloudWatch logging for brokers. @@ -81,11 +83,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"tags"`: Create tags when creating the broker. """ function create_broker( - autoMinorVersionUpgrade, brokerName, deploymentMode, engineType, - engineVersion, hostInstanceType, publiclyAccessible, users; @@ -95,11 +95,9 @@ function create_broker( "POST", "/v1/brokers", Dict{String,Any}( - "autoMinorVersionUpgrade" => autoMinorVersionUpgrade, "brokerName" => brokerName, "deploymentMode" => deploymentMode, "engineType" => engineType, - "engineVersion" => engineVersion, "hostInstanceType" => hostInstanceType, "publiclyAccessible" => publiclyAccessible, "users" => users, @@ -110,11 +108,9 @@ function create_broker( ) end function create_broker( - autoMinorVersionUpgrade, brokerName, deploymentMode, engineType, - engineVersion, hostInstanceType, publiclyAccessible, users, @@ -128,11 +124,9 @@ function create_broker( mergewith( _merge, Dict{String,Any}( - "autoMinorVersionUpgrade" => autoMinorVersionUpgrade, "brokerName" => brokerName, "deploymentMode" => deploymentMode, "engineType" => engineType, - "engineVersion" => engineVersion, "hostInstanceType" => hostInstanceType, "publiclyAccessible" => publiclyAccessible, "users" => users, @@ -147,8 +141,8 @@ function create_broker( end """ - create_configuration(engine_type, engine_version, name) - create_configuration(engine_type, engine_version, name, params::Dict{String,<:Any}) + create_configuration(engine_type, name) + create_configuration(engine_type, name, params::Dict{String,<:Any}) Creates a new configuration for the specified configuration name. Amazon MQ uses the default configuration (the engine type and version). @@ -156,8 +150,6 @@ default configuration (the engine type and version). # Arguments - `engine_type`: Required. The type of broker engine. Currently, Amazon MQ supports ACTIVEMQ and RABBITMQ. -- `engine_version`: Required. The broker engine's version. For a list of supported engine - versions, see Supported engines. - `name`: Required. The name of the configuration. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 1-150 characters long. @@ -166,24 +158,24 @@ default configuration (the engine type and version). Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"authenticationStrategy"`: Optional. The authentication strategy associated with the configuration. The default is SIMPLE. +- `"engineVersion"`: The broker engine version. Defaults to the latest available version + for the specified broker engine type. For more information, see the ActiveMQ version + management and the RabbitMQ version management sections in the Amazon MQ Developer Guide. - `"tags"`: Create tags when creating the configuration. """ function create_configuration( - engineType, engineVersion, name; aws_config::AbstractAWSConfig=global_aws_config() + engineType, name; aws_config::AbstractAWSConfig=global_aws_config() ) return mq( "POST", "/v1/configurations", - Dict{String,Any}( - "engineType" => engineType, "engineVersion" => engineVersion, "name" => name - ); + Dict{String,Any}("engineType" => engineType, "name" => name); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end function create_configuration( engineType, - engineVersion, name, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), @@ -193,13 +185,7 @@ function create_configuration( "/v1/configurations", Dict{String,Any}( mergewith( - _merge, - Dict{String,Any}( - "engineType" => engineType, - "engineVersion" => engineVersion, - "name" => name, - ), - params, + _merge, Dict{String,Any}("engineType" => engineType, "name" => name), params ), ); aws_config=aws_config, @@ -861,13 +847,16 @@ Adds a pending configuration change to a broker. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"authenticationStrategy"`: Optional. The authentication strategy used to secure the broker. The default is SIMPLE. -- `"autoMinorVersionUpgrade"`: Enables automatic upgrades to new minor versions for - brokers, as new versions are released and supported by Amazon MQ. Automatic upgrades occur - during the scheduled maintenance window of the broker or after a manual broker reboot. +- `"autoMinorVersionUpgrade"`: Enables automatic upgrades to new patch versions for brokers + as new versions are released and supported by Amazon MQ. Automatic upgrades occur during + the scheduled maintenance window or after a manual broker reboot. Must be set to true for + ActiveMQ brokers version 5.18 and above and for RabbitMQ brokers version 3.13 and above. - `"configuration"`: A list of information about the configuration. - `"dataReplicationMode"`: Defines whether this broker is a part of a data replication pair. -- `"engineVersion"`: The broker engine version. For a list of supported engine versions, - see Supported engines. +- `"engineVersion"`: The broker engine version. For more information, see the ActiveMQ + version management and the RabbitMQ version management sections in the Amazon MQ Developer + Guide. When upgrading to ActiveMQ version 5.18 and above or RabbitMQ version 3.13 and + above, you must have autoMinorVersionUpgrade set to true for the broker. - `"hostInstanceType"`: The broker's host instance type to upgrade to. For a list of supported instance types, see Broker instance types. - `"ldapServerMetadata"`: Optional. The metadata of the LDAP server used to authenticate diff --git a/src/services/opensearch.jl b/src/services/opensearch.jl index c45c383def..282f8ec461 100644 --- a/src/services/opensearch.jl +++ b/src/services/opensearch.jl @@ -297,6 +297,7 @@ managing Amazon OpenSearch Service domains. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AIMLOptions"`: Options for all machine learning features for the specified domain. - `"AccessPolicies"`: Identity and Access Management (IAM) policy document specifying the access policies for the new domain. - `"AdvancedOptions"`: Key-value pairs to specify advanced configuration options. The @@ -2355,6 +2356,7 @@ Modifies the cluster configuration of the specified Amazon OpenSearch Service do # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AIMLOptions"`: Options for all machine learning features for the specified domain. - `"AccessPolicies"`: Identity and Access Management (IAM) access policy as a JSON-formatted string. - `"AdvancedOptions"`: Key-value pairs to specify advanced configuration options. The diff --git a/src/services/organizations.jl b/src/services/organizations.jl index ffb0df1a7b..83614b04b2 100644 --- a/src/services/organizations.jl +++ b/src/services/organizations.jl @@ -233,24 +233,23 @@ from the organization's management account. For more information about creating see Creating a member account in your organization in the Organizations User Guide. When you create an account in an organization using the Organizations console, API, or CLI commands, the information required for the account to operate as a standalone account, such -as a payment method and signing the end user license agreement (EULA) is not automatically -collected. If you must remove an account from your organization later, you can do so only -after you provide the missing information. For more information, see Considerations before -removing an account from an organization in the Organizations User Guide. If you get an -exception that indicates that you exceeded your account limits for the organization, -contact Amazon Web Services Support. If you get an exception that indicates that the -operation failed because your organization is still initializing, wait one hour and then -try again. If the error persists, contact Amazon Web Services Support. Using -CreateAccount to create multiple temporary accounts isn't recommended. You can only close -an account from the Billing and Cost Management console, and you must be signed in as the -root user. For information on the requirements and process for closing an account, see -Closing a member account in your organization in the Organizations User Guide. When you -create a member account with this operation, you can choose whether to create the account -with the IAM User and Role Access to Billing Information switch enabled. If you enable it, -IAM users and roles that have appropriate permissions can view billing information for the -account. If you disable it, only the account root user can access billing information. For -information about how to disable this switch for an account, see Granting access to your -billing information and tools. +as a payment method is not automatically collected. If you must remove an account from your +organization later, you can do so only after you provide the missing information. For more +information, see Considerations before removing an account from an organization in the +Organizations User Guide. If you get an exception that indicates that you exceeded your +account limits for the organization, contact Amazon Web Services Support. If you get an +exception that indicates that the operation failed because your organization is still +initializing, wait one hour and then try again. If the error persists, contact Amazon Web +Services Support. Using CreateAccount to create multiple temporary accounts isn't +recommended. You can only close an account from the Billing and Cost Management console, +and you must be signed in as the root user. For information on the requirements and process +for closing an account, see Closing a member account in your organization in the +Organizations User Guide. When you create a member account with this operation, you can +choose whether to create the account with the IAM User and Role Access to Billing +Information switch enabled. If you enable it, IAM users and roles that have appropriate +permissions can view billing information for the account. If you disable it, only the +account root user can access billing information. For information about how to disable this +switch for an account, see Granting access to your billing information and tools. # Arguments - `account_name`: The friendly name of the member account. diff --git a/src/services/payment_cryptography_data.jl b/src/services/payment_cryptography_data.jl index cd9714f20c..08697fd465 100644 --- a/src/services/payment_cryptography_data.jl +++ b/src/services/payment_cryptography_data.jl @@ -32,8 +32,14 @@ operations: EncryptData GetPublicCertificate ImportKey - `cipher_text`: The ciphertext to decrypt. - `decryption_attributes`: The encryption key type and attributes for ciphertext decryption. - `key_identifier`: The keyARN of the encryption key that Amazon Web Services Payment - Cryptography uses for ciphertext decryption. + Cryptography uses for ciphertext decryption. When a WrappedKeyBlock is provided, this value + will be the identifier to the key wrapping key. Otherwise, it is the key identifier used to + perform the operation. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"WrappedKey"`: The WrappedKeyBlock containing the encryption key for ciphertext + decryption. """ function decrypt_data( CipherText, @@ -106,12 +112,18 @@ ImportKey ReEncryptData # Arguments - `encryption_attributes`: The encryption key type and attributes for plaintext encryption. - `key_identifier`: The keyARN of the encryption key that Amazon Web Services Payment - Cryptography uses for plaintext encryption. + Cryptography uses for plaintext encryption. When a WrappedKeyBlock is provided, this value + will be the identifier to the key wrapping key. Otherwise, it is the key identifier used to + perform the operation. - `plain_text`: The plaintext to be encrypted. For encryption using asymmetric keys, plaintext data length is constrained by encryption key strength that you define in KeyAlgorithm and padding type that you define in AsymmetricEncryptionAttributes. For more information, see Encrypt data in the Amazon Web Services Payment Cryptography User Guide. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"WrappedKey"`: The WrappedKeyBlock containing the encryption key for plaintext + encryption. """ function encrypt_data( EncryptionAttributes, @@ -396,32 +408,37 @@ end re_encrypt_data(cipher_text, incoming_encryption_attributes, incoming_key_identifier, outgoing_encryption_attributes, outgoing_key_identifier) re_encrypt_data(cipher_text, incoming_encryption_attributes, incoming_key_identifier, outgoing_encryption_attributes, outgoing_key_identifier, params::Dict{String,<:Any}) -Re-encrypt ciphertext using DUKPT, Symmetric and Asymmetric Data Encryption Keys. You can -either generate an encryption key within Amazon Web Services Payment Cryptography by -calling CreateKey or import your own encryption key by calling ImportKey. The KeyArn for -use with this operation must be in a compatible key state with KeyModesOfUse set to -Encrypt. In asymmetric encryption, ciphertext is encrypted using public component (imported -by calling ImportKey) of the asymmetric key pair created outside of Amazon Web Services -Payment Cryptography. For symmetric and DUKPT encryption, Amazon Web Services Payment -Cryptography supports TDES and AES algorithms. For asymmetric encryption, Amazon Web -Services Payment Cryptography supports RSA. To encrypt using DUKPT, a DUKPT key must -already exist within your account with KeyModesOfUse set to DeriveKey or a new DUKPT can be -generated by calling CreateKey. For information about valid keys for this operation, see -Understanding key attributes and Key types for specific data operations in the Amazon Web -Services Payment Cryptography User Guide. Cross-account use: This operation can't be used -across different Amazon Web Services accounts. Related operations: DecryptData -EncryptData GetPublicCertificate ImportKey +Re-encrypt ciphertext using DUKPT or Symmetric data encryption keys. You can either +generate an encryption key within Amazon Web Services Payment Cryptography by calling +CreateKey or import your own encryption key by calling ImportKey. The KeyArn for use with +this operation must be in a compatible key state with KeyModesOfUse set to Encrypt. For +symmetric and DUKPT encryption, Amazon Web Services Payment Cryptography supports TDES and +AES algorithms. To encrypt using DUKPT, a DUKPT key must already exist within your account +with KeyModesOfUse set to DeriveKey or a new DUKPT can be generated by calling CreateKey. +For information about valid keys for this operation, see Understanding key attributes and +Key types for specific data operations in the Amazon Web Services Payment Cryptography User +Guide. Cross-account use: This operation can't be used across different Amazon Web +Services accounts. Related operations: DecryptData EncryptData +GetPublicCertificate ImportKey # Arguments - `cipher_text`: Ciphertext to be encrypted. The minimum allowed length is 16 bytes and maximum allowed length is 4096 bytes. - `incoming_encryption_attributes`: The attributes and values for incoming ciphertext. - `incoming_key_identifier`: The keyARN of the encryption key of incoming ciphertext data. + When a WrappedKeyBlock is provided, this value will be the identifier to the key wrapping + key. Otherwise, it is the key identifier used to perform the operation. - `outgoing_encryption_attributes`: The attributes and values for outgoing ciphertext data after encryption by Amazon Web Services Payment Cryptography. - `outgoing_key_identifier`: The keyARN of the encryption key of outgoing ciphertext data after encryption by Amazon Web Services Payment Cryptography. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"IncomingWrappedKey"`: The WrappedKeyBlock containing the encryption key of incoming + ciphertext data. +- `"OutgoingWrappedKey"`: The WrappedKeyBlock containing the encryption key of outgoing + ciphertext data after encryption by Amazon Web Services Payment Cryptography. """ function re_encrypt_data( CipherText, @@ -500,7 +517,9 @@ operations: GeneratePinData VerifyPinData - `encrypted_pin_block`: The encrypted PIN block data that Amazon Web Services Payment Cryptography translates. - `incoming_key_identifier`: The keyARN of the encryption key under which incoming PIN - block data is encrypted. This key type can be PEK or BDK. + block data is encrypted. This key type can be PEK or BDK. When a WrappedKeyBlock is + provided, this value will be the identifier to the key wrapping key for PIN block. + Otherwise, it is the key identifier used to perform the operation. - `incoming_translation_attributes`: The format of the incoming PIN block data for translation within Amazon Web Services Payment Cryptography. - `outgoing_key_identifier`: The keyARN of the encryption key for encrypting outgoing PIN @@ -512,8 +531,12 @@ operations: GeneratePinData VerifyPinData Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"IncomingDukptAttributes"`: The attributes and values to use for incoming DUKPT encryption key for PIN block translation. +- `"IncomingWrappedKey"`: The WrappedKeyBlock containing the encryption key under which + incoming PIN block data is encrypted. - `"OutgoingDukptAttributes"`: The attributes and values to use for outgoing DUKPT encryption key after PIN block translation. +- `"OutgoingWrappedKey"`: The WrappedKeyBlock containing the encryption key for encrypting + outgoing PIN block data. """ function translate_pin_data( EncryptedPinBlock, diff --git a/src/services/pi.jl b/src/services/pi.jl index 8d1ac9c6e8..5941314693 100644 --- a/src/services/pi.jl +++ b/src/services/pi.jl @@ -173,7 +173,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys \"AdditionalMetrics\" : { \"string\" : \"string\" }. - `"Filter"`: One or more filters to apply in the request. Restrictions: Any number of filters by the same dimension, as specified in the GroupBy or Partition parameters. A - single filter for any other dimension in this dimension group. + single filter for any other dimension in this dimension group. The db.sql.db_id filter + isn't available for RDS for SQL Server DB instances. - `"MaxResults"`: The maximum number of items to return in the response. If more items exist than the specified MaxRecords value, a pagination token is included in the response so that the remaining results can be retrieved. diff --git a/src/services/pinpoint_sms_voice_v2.jl b/src/services/pinpoint_sms_voice_v2.jl index f9c6893414..0ce86979f2 100644 --- a/src/services/pinpoint_sms_voice_v2.jl +++ b/src/services/pinpoint_sms_voice_v2.jl @@ -184,12 +184,12 @@ end create_event_destination(configuration_set_name, event_destination_name, matching_event_types, params::Dict{String,<:Any}) Creates a new event destination in a configuration set. An event destination is a location -where you send message events. The event options are Amazon CloudWatch, Amazon Kinesis Data +where you send message events. The event options are Amazon CloudWatch, Amazon Data Firehose, or Amazon SNS. For example, when a message is delivered successfully, you can send information about that event to an event destination, or send notifications to endpoints that are subscribed to an Amazon SNS topic. Each configuration set can contain between 0 and 5 event destinations. Each event destination can contain a reference to a -single destination, such as a CloudWatch or Kinesis Data Firehose destination. +single destination, such as a CloudWatch or Firehose destination. # Arguments - `configuration_set_name`: Either the name of the configuration set or the configuration @@ -197,8 +197,8 @@ single destination, such as a CloudWatch or Kinesis Data Firehose destination. found using the DescribeConfigurationSets action. - `event_destination_name`: The name that identifies the event destination. - `matching_event_types`: An array of event types that determine which events to log. If - \"ALL\" is used, then Amazon Pinpoint logs every event type. The TEXT_SENT event type is - not supported. + \"ALL\" is used, then AWS End User Messaging SMS and Voice logs every event type. The + TEXT_SENT event type is not supported. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -208,7 +208,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"CloudWatchLogsDestination"`: An object that contains information about an event destination for logging to Amazon CloudWatch Logs. - `"KinesisFirehoseDestination"`: An object that contains information about an event - destination for logging to Amazon Kinesis Data Firehose. + destination for logging to Amazon Data Firehose. - `"SnsDestination"`: An object that contains information about an event destination for logging to Amazon SNS. """ @@ -265,7 +265,7 @@ An opt-out list is a list of phone numbers that are opted out, meaning you can't or voice messages to them. If end user replies with the keyword \"STOP,\" an entry for the phone number is added to the opt-out list. In addition to STOP, your recipients can use any supported opt-out keyword, such as CANCEL or OPTOUT. For a list of supported opt-out -keywords, see SMS opt out in the Amazon Pinpoint User Guide. +keywords, see SMS opt out in the AWS End User Messaging SMS User Guide. # Arguments - `opt_out_list_name`: The name of the new OptOutList. @@ -328,11 +328,12 @@ be associated with multiple pools. country or region of the new pool. - `message_type`: The type of message. Valid values are TRANSACTIONAL for messages that are critical or time-sensitive and PROMOTIONAL for messages that aren't critical or - time-sensitive. + time-sensitive. After the pool is created the MessageType can't be changed. - `origination_identity`: The origination identity to use such as a PhoneNumberId, PhoneNumberArn, SenderId or SenderIdArn. You can use DescribePhoneNumbers to find the values for PhoneNumberId and PhoneNumberArn while DescribeSenderIds can be used to get the - values for SenderId and SenderIdArn. + values for SenderId and SenderIdArn. After the pool is created you can add more origination + identities to the pool by using AssociateOriginationIdentity. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -871,8 +872,8 @@ Deletes an existing keyword from an origination phone number or pool. A keyword that you can search for on a particular phone number or pool. It is also a specific word or phrase that an end user can send to your number to elicit a response, such as an informational message or a special offer. When your number receives a message that begins -with a keyword, Amazon Pinpoint responds with a customizable message. Keywords \"HELP\" and -\"STOP\" can't be deleted or modified. +with a keyword, AWS End User Messaging SMS and Voice responds with a customizable message. +Keywords \"HELP\" and \"STOP\" can't be deleted or modified. # Arguments - `keyword`: The keyword to delete. @@ -1230,8 +1231,8 @@ end Deletes an account-level monthly spending limit override for sending text messages. Deleting a spend limit override will set the EnforcedLimit to equal the MaxLimit, which is -controlled by Amazon Web Services. For more information on spend limits (quotas) see Amazon -Pinpoint quotas in the Amazon Pinpoint Developer Guide. +controlled by Amazon Web Services. For more information on spend limits (quotas) see Quotas + in the AWS End User Messaging SMS User Guide. """ function delete_text_message_spend_limit_override(; @@ -1302,8 +1303,8 @@ end Deletes an account level monthly spend limit override for sending voice messages. Deleting a spend limit override sets the EnforcedLimit equal to the MaxLimit, which is controlled by -Amazon Web Services. For more information on spending limits (quotas) see Amazon Pinpoint -quotas in the Amazon Pinpoint Developer Guide. +Amazon Web Services. For more information on spending limits (quotas) see Quotas in the +AWS End User Messaging SMS User Guide. """ function delete_voice_message_spend_limit_override(; @@ -1333,9 +1334,9 @@ end Describes attributes of your Amazon Web Services account. The supported account attributes include account tier, which indicates whether your account is in the sandbox or production environment. When you're ready to move your account out of the sandbox, create an Amazon -Web Services Support case for a service limit increase request. New Amazon Pinpoint -accounts are placed into an SMS or voice sandbox. The sandbox protects both Amazon Web -Services end recipients and SMS or voice recipients from fraud and abuse. +Web Services Support case for a service limit increase request. New accounts are placed +into an SMS or voice sandbox. The sandbox protects both Amazon Web Services end recipients +and SMS or voice recipients from fraud and abuse. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1363,12 +1364,12 @@ end describe_account_limits() describe_account_limits(params::Dict{String,<:Any}) -Describes the current Amazon Pinpoint SMS Voice V2 resource quotas for your account. The -description for a quota includes the quota name, current usage toward that quota, and the -quota's maximum value. When you establish an Amazon Web Services account, the account has -initial quotas on the maximum number of configuration sets, opt-out lists, phone numbers, -and pools that you can create in a given Region. For more information see Amazon Pinpoint -quotas in the Amazon Pinpoint Developer Guide. +Describes the current AWS End User Messaging SMS and Voice SMS Voice V2 resource quotas for +your account. The description for a quota includes the quota name, current usage toward +that quota, and the quota's maximum value. When you establish an Amazon Web Services +account, the account has initial quotas on the maximum number of configuration sets, +opt-out lists, phone numbers, and pools that you can create in a given Region. For more +information see Quotas in the AWS End User Messaging SMS User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1436,8 +1437,8 @@ Describes the specified keywords or all keywords on your origination phone numbe A keyword is a word that you can search for on a particular phone number or pool. It is also a specific word or phrase that an end user can send to your number to elicit a response, such as an informational message or a special offer. When your number receives a -message that begins with a keyword, Amazon Pinpoint responds with a customizable message. -If you specify a keyword that isn't valid, an error is returned. +message that begins with a keyword, AWS End User Messaging SMS and Voice responds with a +customizable message. If you specify a keyword that isn't valid, an error is returned. # Arguments - `origination_identity`: The origination identity to use such as a PhoneNumberId, @@ -1987,11 +1988,11 @@ end describe_spend_limits() describe_spend_limits(params::Dict{String,<:Any}) -Describes the current Amazon Pinpoint monthly spend limits for sending voice and text -messages. When you establish an Amazon Web Services account, the account has initial -monthly spend limit in a given Region. For more information on increasing your monthly -spend limit, see Requesting increases to your monthly SMS spending quota for Amazon -Pinpoint in the Amazon Pinpoint User Guide. +Describes the current monthly spend limits for sending voice and text messages. When you +establish an Amazon Web Services account, the account has initial monthly spend limit in a +given Region. For more information on increasing your monthly spend limit, see Requesting +increases to your monthly SMS, MMS, or Voice spending quota in the AWS End User Messaging +SMS User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -2377,8 +2378,8 @@ Creates or updates a keyword configuration on an origination phone number or poo keyword is a word that you can search for on a particular phone number or pool. It is also a specific word or phrase that an end user can send to your number to elicit a response, such as an informational message or a special offer. When your number receives a message -that begins with a keyword, Amazon Pinpoint responds with a customizable message. If you -specify a keyword that isn't valid, an error is returned. +that begins with a keyword, AWS End User Messaging SMS and Voice responds with a +customizable message. If you specify a keyword that isn't valid, an error is returned. # Arguments - `keyword`: The new keyword to add. @@ -2618,7 +2619,7 @@ end request_phone_number(iso_country_code, message_type, number_capabilities, number_type, params::Dict{String,<:Any}) Request an origination phone number for use in your account. For more information on phone -number request see Requesting a number in the Amazon Pinpoint User Guide. +number request see Request a phone number in the AWS End User Messaging SMS User Guide. # Arguments - `iso_country_code`: The two-character code, in ISO 3166-1 alpha-2 format, for the country @@ -2892,11 +2893,12 @@ end send_text_message(destination_phone_number) send_text_message(destination_phone_number, params::Dict{String,<:Any}) -Creates a new text message and sends it to a recipient's phone number. SMS throughput -limits are measured in Message Parts per Second (MPS). Your MPS limit depends on the -destination country of your messages, as well as the type of phone number (origination -number) that you use to send the message. For more information, see Message Parts per -Second (MPS) limits in the Amazon Pinpoint User Guide. +Creates a new text message and sends it to a recipient's phone number. SendTextMessage only +sends an SMS message to one recipient each time it is invoked. SMS throughput limits are +measured in Message Parts per Second (MPS). Your MPS limit depends on the destination +country of your messages, as well as the type of phone number (origination number) that you +use to send the message. For more information about MPS, see Message Parts per Second (MPS) +limits in the AWS End User Messaging SMS User Guide. # Arguments - `destination_phone_number`: The destination phone number in E.164 format. @@ -2910,20 +2912,31 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"DestinationCountryParameters"`: This field is used for any country-specific registration requirements. Currently, this setting is only used when you send messages to recipients in India using a sender ID. For more information see Special requirements for - sending SMS messages to recipients in India. + sending SMS messages to recipients in India. IN_ENTITY_ID The entity ID or Principal + Entity (PE) ID that you received after completing the sender ID registration process. + IN_TEMPLATE_ID The template ID that you received after completing the sender ID + registration process. Make sure that the Template ID that you specify matches your message + template exactly. If your message doesn't match the template that you provided during the + registration process, the mobile carriers might reject your message. - `"DryRun"`: When set to true, the message is checked and validated, but isn't sent to the - end recipient. + end recipient. You are not charged for using DryRun. The Message Parts per Second (MPS) + limit when using DryRun is five. If your origination identity has a lower MPS limit then + the lower MPS limit is used. For more information about MPS limits, see Message Parts per + Second (MPS) limits in the AWS End User Messaging SMS User Guide.. - `"Keyword"`: When you register a short code in the US, you must specify a program name. If you don’t have a US short code, omit this attribute. - `"MaxPrice"`: The maximum amount that you want to spend, in US dollars, per each text - message part. A text message can contain multiple parts. + message. If the calculated amount to send the text message is greater than MaxPrice, the + message is not sent and an error is returned. - `"MessageBody"`: The body of the text message. - `"MessageType"`: The type of message. Valid values are for messages that are critical or time-sensitive and PROMOTIONAL for messages that aren't critical or time-sensitive. - `"OriginationIdentity"`: The origination identity of the message. This can be either the PhoneNumber, PhoneNumberId, PhoneNumberArn, SenderId, SenderIdArn, PoolId, or PoolArn. - `"ProtectConfigurationId"`: The unique identifier for the protect configuration. -- `"TimeToLive"`: How long the text message is valid for. By default this is 72 hours. +- `"TimeToLive"`: How long the text message is valid for, in seconds. By default this is 72 + hours. If the messages isn't handed off before the TTL expires we stop attempting to hand + off the message and return TTL_EXPIRED event. """ function send_text_message( DestinationPhoneNumber; aws_config::AbstractAWSConfig=global_aws_config() @@ -2958,8 +2971,8 @@ end send_voice_message(destination_phone_number, origination_identity) send_voice_message(destination_phone_number, origination_identity, params::Dict{String,<:Any}) -Allows you to send a request that sends a voice message through Amazon Pinpoint. This -operation uses Amazon Polly to convert a text script into a voice message. +Allows you to send a request that sends a voice message. This operation uses Amazon Polly +to convert a text script into a voice message. # Arguments - `destination_phone_number`: The destination phone number in E.164 format. @@ -3320,11 +3333,11 @@ end tag_resource(resource_arn, tags) tag_resource(resource_arn, tags, params::Dict{String,<:Any}) -Adds or overwrites only the specified tags for the specified Amazon Pinpoint SMS Voice, -version 2 resource. When you specify an existing tag key, the value is overwritten with the -new value. Each resource can have a maximum of 50 tags. Each tag consists of a key and an -optional value. Tag keys must be unique per resource. For more information about tags, see -Tagging Amazon Pinpoint resources in the Amazon Pinpoint Developer Guide. +Adds or overwrites only the specified tags for the specified resource. When you specify an +existing tag key, the value is overwritten with the new value. Each resource can have a +maximum of 50 tags. Each tag consists of a key and an optional value. Tag keys must be +unique per resource. For more information about tags, see Tags in the AWS End User +Messaging SMS User Guide. # Arguments - `resource_arn`: The Amazon Resource Name (ARN) of the resource. @@ -3363,9 +3376,8 @@ end untag_resource(resource_arn, tag_keys) untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) -Removes the association of the specified tags from an Amazon Pinpoint SMS Voice V2 -resource. For more information on tags see Tagging Amazon Pinpoint resources in the Amazon -Pinpoint Developer Guide. +Removes the association of the specified tags from a resource. For more information on tags +see Tags in the AWS End User Messaging SMS User Guide. # Arguments - `resource_arn`: The Amazon Resource Name (ARN) of the resource. @@ -3407,10 +3419,10 @@ end update_event_destination(configuration_set_name, event_destination_name, params::Dict{String,<:Any}) Updates an existing event destination in a configuration set. You can update the IAM role -ARN for CloudWatch Logs and Kinesis Data Firehose. You can also enable or disable the event -destination. You may want to update an event destination to change its matching event types -or updating the destination resource ARN. You can't change an event destination's type -between CloudWatch Logs, Kinesis Data Firehose, and Amazon SNS. +ARN for CloudWatch Logs and Firehose. You can also enable or disable the event destination. +You may want to update an event destination to change its matching event types or updating +the destination resource ARN. You can't change an event destination's type between +CloudWatch Logs, Firehose, and Amazon SNS. # Arguments - `configuration_set_name`: The configuration set to update with the new event destination. @@ -3423,7 +3435,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys destination that sends data to CloudWatch Logs. - `"Enabled"`: When set to true logging is enabled. - `"KinesisFirehoseDestination"`: An object that contains information about an event - destination for logging to Kinesis Data Firehose. + destination for logging to Firehose. - `"MatchingEventTypes"`: An array of event types that determine which events to log. The TEXT_SENT event type is not supported. - `"SnsDestination"`: An object that contains information about an event destination that @@ -3487,10 +3499,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"OptOutListName"`: The OptOutList to add the phone number to. Valid values for this field can be either the OutOutListName or OutOutListArn. - `"SelfManagedOptOutsEnabled"`: By default this is set to false. When an end recipient - sends a message that begins with HELP or STOP to one of your dedicated numbers, Amazon - Pinpoint automatically replies with a customizable message and adds the end recipient to - the OptOutList. When set to true you're responsible for responding to HELP and STOP - requests. You're also responsible for tracking and honoring opt-out requests. + sends a message that begins with HELP or STOP to one of your dedicated numbers, AWS End + User Messaging SMS and Voice automatically replies with a customizable message and adds the + end recipient to the OptOutList. When set to true you're responsible for responding to HELP + and STOP requests. You're also responsible for tracking and honoring opt-out requests. - `"TwoWayChannelArn"`: The Amazon Resource Name (ARN) of the two way channel. - `"TwoWayChannelRole"`: An optional IAM Role Arn for a service to assume, to be able to post inbound SMS messages. @@ -3540,10 +3552,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"OptOutListName"`: The OptOutList to associate with the pool. Valid values are either OptOutListName or OptOutListArn. - `"SelfManagedOptOutsEnabled"`: By default this is set to false. When an end recipient - sends a message that begins with HELP or STOP to one of your dedicated numbers, Amazon - Pinpoint automatically replies with a customizable message and adds the end recipient to - the OptOutList. When set to true you're responsible for responding to HELP and STOP - requests. You're also responsible for tracking and honoring opt-out requests. + sends a message that begins with HELP or STOP to one of your dedicated numbers, AWS End + User Messaging SMS and Voice automatically replies with a customizable message and adds the + end recipient to the OptOutList. When set to true you're responsible for responding to HELP + and STOP requests. You're also responsible for tracking and honoring opt-out requests. - `"SharedRoutesEnabled"`: Indicates whether shared routes are enabled for the pool. - `"TwoWayChannelArn"`: The Amazon Resource Name (ARN) of the two way channel. - `"TwoWayChannelRole"`: An optional IAM Role Arn for a service to assume, to be able to @@ -3625,7 +3637,7 @@ only applied to the specified NumberCapability type. - `country_rule_set_updates`: A map of ProtectConfigurationCountryRuleSetInformation objects that contain the details for the requested NumberCapability. The Key is the two-letter ISO country code. For a list of supported ISO country codes, see Supported - countries and regions (SMS channel) in the Amazon Pinpoint SMS user guide. + countries and regions (SMS channel) in the AWS End User Messaging SMS User Guide. - `number_capability`: The number capability to apply the CountryRuleSetUpdates updates to. - `protect_configuration_id`: The unique identifier for the protect configuration. diff --git a/src/services/qapps.jl b/src/services/qapps.jl new file mode 100644 index 0000000000..978be8f087 --- /dev/null +++ b/src/services/qapps.jl @@ -0,0 +1,1189 @@ +# This file is auto-generated by AWSMetadata.jl +using AWS +using AWS.AWSServices: qapps +using AWS.Compat +using AWS.UUIDs + +""" + associate_library_item_review(instance-id, library_item_id) + associate_library_item_review(instance-id, library_item_id, params::Dict{String,<:Any}) + +Associates a rating or review for a library item with the user submitting the request. This +increments the rating count for the specified library item. + +# Arguments +- `instance-id`: The unique identifier for the Amazon Q Business application environment + instance. +- `library_item_id`: The unique identifier of the library item to associate the review with. + +""" +function associate_library_item_review( + instance_id, libraryItemId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qapps( + "POST", + "/catalog.associateItemRating", + Dict{String,Any}( + "libraryItemId" => libraryItemId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function associate_library_item_review( + instance_id, + libraryItemId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/catalog.associateItemRating", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "libraryItemId" => libraryItemId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + associate_qapp_with_user(app_id, instance-id) + associate_qapp_with_user(app_id, instance-id, params::Dict{String,<:Any}) + +This operation creates a link between the user's identity calling the operation and a +specific Q App. This is useful to mark the Q App as a favorite for the user if the user +doesn't own the Amazon Q App so they can still run it and see it in their inventory of Q +Apps. + +# Arguments +- `app_id`: The ID of the Amazon Q App to associate with the user. +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. + +""" +function associate_qapp_with_user( + appId, instance_id; aws_config::AbstractAWSConfig=global_aws_config() +) + return qapps( + "POST", + "/apps.install", + Dict{String,Any}( + "appId" => appId, "headers" => Dict{String,Any}("instance-id" => instance_id) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function associate_qapp_with_user( + appId, + instance_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/apps.install", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "appId" => appId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_library_item(app_id, app_version, categories, instance-id) + create_library_item(app_id, app_version, categories, instance-id, params::Dict{String,<:Any}) + +Creates a new library item for an Amazon Q App, allowing it to be discovered and used by +other allowed users. + +# Arguments +- `app_id`: The unique identifier of the Amazon Q App to publish to the library. +- `app_version`: The version of the Amazon Q App to publish to the library. +- `categories`: The categories to associate with the library item for easier discovery. +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. + +""" +function create_library_item( + appId, + appVersion, + categories, + instance_id; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/catalog.createItem", + Dict{String,Any}( + "appId" => appId, + "appVersion" => appVersion, + "categories" => categories, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_library_item( + appId, + appVersion, + categories, + instance_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/catalog.createItem", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "appId" => appId, + "appVersion" => appVersion, + "categories" => categories, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_qapp(app_definition, instance-id, title) + create_qapp(app_definition, instance-id, title, params::Dict{String,<:Any}) + +Creates a new Amazon Q App based on the provided definition. The Q App definition specifies +the cards and flow of the Q App. This operation also calculates the dependencies between +the cards by inspecting the references in the prompts. + +# Arguments +- `app_definition`: The definition of the new Q App, specifying the cards and flow. +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. +- `title`: The title of the new Q App. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: The description of the new Q App. +- `"tags"`: Optional tags to associate with the new Q App. +""" +function create_qapp( + appDefinition, instance_id, title; aws_config::AbstractAWSConfig=global_aws_config() +) + return qapps( + "POST", + "/apps.create", + Dict{String,Any}( + "appDefinition" => appDefinition, + "title" => title, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_qapp( + appDefinition, + instance_id, + title, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/apps.create", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "appDefinition" => appDefinition, + "title" => title, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_library_item(instance-id, library_item_id) + delete_library_item(instance-id, library_item_id, params::Dict{String,<:Any}) + +Deletes a library item for an Amazon Q App, removing it from the library so it can no +longer be discovered or used by other users. + +# Arguments +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. +- `library_item_id`: The unique identifier of the library item to delete. + +""" +function delete_library_item( + instance_id, libraryItemId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qapps( + "POST", + "/catalog.deleteItem", + Dict{String,Any}( + "libraryItemId" => libraryItemId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_library_item( + instance_id, + libraryItemId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/catalog.deleteItem", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "libraryItemId" => libraryItemId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_qapp(app_id, instance-id) + delete_qapp(app_id, instance-id, params::Dict{String,<:Any}) + +Deletes an Amazon Q App owned by the user. If the Q App was previously published to the +library, it is also removed from the library. + +# Arguments +- `app_id`: The unique identifier of the Q App to delete. +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. + +""" +function delete_qapp(appId, instance_id; aws_config::AbstractAWSConfig=global_aws_config()) + return qapps( + "POST", + "/apps.delete", + Dict{String,Any}( + "appId" => appId, "headers" => Dict{String,Any}("instance-id" => instance_id) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_qapp( + appId, + instance_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/apps.delete", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "appId" => appId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + disassociate_library_item_review(instance-id, library_item_id) + disassociate_library_item_review(instance-id, library_item_id, params::Dict{String,<:Any}) + +Removes a rating or review previously submitted by the user for a library item. + +# Arguments +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. +- `library_item_id`: The unique identifier of the library item to remove the review from. + +""" +function disassociate_library_item_review( + instance_id, libraryItemId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qapps( + "POST", + "/catalog.disassociateItemRating", + Dict{String,Any}( + "libraryItemId" => libraryItemId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function disassociate_library_item_review( + instance_id, + libraryItemId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/catalog.disassociateItemRating", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "libraryItemId" => libraryItemId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + disassociate_qapp_from_user(app_id, instance-id) + disassociate_qapp_from_user(app_id, instance-id, params::Dict{String,<:Any}) + +Disassociates a Q App from a user removing the user's access to run the Q App. + +# Arguments +- `app_id`: The unique identifier of the Q App to disassociate from the user. +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. + +""" +function disassociate_qapp_from_user( + appId, instance_id; aws_config::AbstractAWSConfig=global_aws_config() +) + return qapps( + "POST", + "/apps.uninstall", + Dict{String,Any}( + "appId" => appId, "headers" => Dict{String,Any}("instance-id" => instance_id) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function disassociate_qapp_from_user( + appId, + instance_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/apps.uninstall", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "appId" => appId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_library_item(instance-id, library_item_id) + get_library_item(instance-id, library_item_id, params::Dict{String,<:Any}) + +Retrieves details about a library item for an Amazon Q App, including its metadata, +categories, ratings, and usage statistics. + +# Arguments +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. +- `library_item_id`: The unique identifier of the library item to retrieve. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"appId"`: The unique identifier of the Amazon Q App associated with the library item. +""" +function get_library_item( + instance_id, libraryItemId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qapps( + "GET", + "/catalog.getItem", + Dict{String,Any}( + "libraryItemId" => libraryItemId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_library_item( + instance_id, + libraryItemId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "GET", + "/catalog.getItem", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "libraryItemId" => libraryItemId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_qapp(app_id, instance-id) + get_qapp(app_id, instance-id, params::Dict{String,<:Any}) + +Retrieves the full details of an Q App, including its definition specifying the cards and +flow. + +# Arguments +- `app_id`: The unique identifier of the Q App to retrieve. +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. + +""" +function get_qapp(appId, instance_id; aws_config::AbstractAWSConfig=global_aws_config()) + return qapps( + "GET", + "/apps.get", + Dict{String,Any}( + "appId" => appId, "headers" => Dict{String,Any}("instance-id" => instance_id) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_qapp( + appId, + instance_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "GET", + "/apps.get", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "appId" => appId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_qapp_session(instance-id, session_id) + get_qapp_session(instance-id, session_id, params::Dict{String,<:Any}) + +Retrieves the current state and results for an active session of an Amazon Q App. + +# Arguments +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. +- `session_id`: The unique identifier of the Q App session to retrieve. + +""" +function get_qapp_session( + instance_id, sessionId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qapps( + "GET", + "/runtime.getQAppSession", + Dict{String,Any}( + "sessionId" => sessionId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_qapp_session( + instance_id, + sessionId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "GET", + "/runtime.getQAppSession", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "sessionId" => sessionId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + import_document(app_id, card_id, file_contents_base64, file_name, instance-id, scope) + import_document(app_id, card_id, file_contents_base64, file_name, instance-id, scope, params::Dict{String,<:Any}) + +Uploads a file that can then be used either as a default in a FileUploadCard from Q App +definition or as a file that is used inside a single Q App run. The purpose of the document +is determined by a scope parameter that indicates whether it is at the app definition level +or at the app session level. + +# Arguments +- `app_id`: The unique identifier of the Q App the file is associated with. +- `card_id`: The unique identifier of the card the file is associated with, if applicable. +- `file_contents_base64`: The base64-encoded contents of the file to upload. +- `file_name`: The name of the file being uploaded. +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. +- `scope`: Whether the file is associated with an Q App definition or a specific Q App + session. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"sessionId"`: The unique identifier of the Q App session the file is associated with, if + applicable. +""" +function import_document( + appId, + cardId, + fileContentsBase64, + fileName, + instance_id, + scope; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/apps.importDocument", + Dict{String,Any}( + "appId" => appId, + "cardId" => cardId, + "fileContentsBase64" => fileContentsBase64, + "fileName" => fileName, + "scope" => scope, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function import_document( + appId, + cardId, + fileContentsBase64, + fileName, + instance_id, + scope, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/apps.importDocument", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "appId" => appId, + "cardId" => cardId, + "fileContentsBase64" => fileContentsBase64, + "fileName" => fileName, + "scope" => scope, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_library_items(instance-id) + list_library_items(instance-id, params::Dict{String,<:Any}) + +Lists the library items for Amazon Q Apps that are published and available for users in +your Amazon Web Services account. + +# Arguments +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"categoryId"`: Optional category to filter the library items by. +- `"limit"`: The maximum number of library items to return in the response. +- `"nextToken"`: The token to request the next page of results. +""" +function list_library_items(instance_id; aws_config::AbstractAWSConfig=global_aws_config()) + return qapps( + "GET", + "/catalog.list", + Dict{String,Any}("headers" => Dict{String,Any}("instance-id" => instance_id)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_library_items( + instance_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "GET", + "/catalog.list", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "headers" => Dict{String,Any}("instance-id" => instance_id) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_qapps(instance-id) + list_qapps(instance-id, params::Dict{String,<:Any}) + +Lists the Amazon Q Apps owned by or associated with the user either because they created it +or because they used it from the library in the past. The user identity is extracted from +the credentials used to invoke this operation.. + +# Arguments +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"limit"`: The maximum number of Q Apps to return in the response. +- `"nextToken"`: The token to request the next page of results. +""" +function list_qapps(instance_id; aws_config::AbstractAWSConfig=global_aws_config()) + return qapps( + "GET", + "/apps.list", + Dict{String,Any}("headers" => Dict{String,Any}("instance-id" => instance_id)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_qapps( + instance_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "GET", + "/apps.list", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "headers" => Dict{String,Any}("instance-id" => instance_id) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_tags_for_resource(resource_arn) + list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) + +Lists the tags associated with an Amazon Q Apps resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the resource whose tags should be + listed. + +""" +function list_tags_for_resource( + resourceARN; aws_config::AbstractAWSConfig=global_aws_config() +) + return qapps( + "GET", + "/tags/$(resourceARN)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_tags_for_resource( + resourceARN, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "GET", + "/tags/$(resourceARN)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + predict_qapp(instance-id) + predict_qapp(instance-id, params::Dict{String,<:Any}) + +Generates an Amazon Q App definition based on either a conversation or a problem statement +provided as input.The resulting app definition can be used to call CreateQApp. This API +doesn't create Amazon Q Apps directly. + +# Arguments +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"options"`: The input to generate the Q App definition from, either a conversation or + problem statement. +""" +function predict_qapp(instance_id; aws_config::AbstractAWSConfig=global_aws_config()) + return qapps( + "POST", + "/apps.predictQApp", + Dict{String,Any}("headers" => Dict{String,Any}("instance-id" => instance_id)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function predict_qapp( + instance_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/apps.predictQApp", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "headers" => Dict{String,Any}("instance-id" => instance_id) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + start_qapp_session(app_id, app_version, instance-id) + start_qapp_session(app_id, app_version, instance-id, params::Dict{String,<:Any}) + +Starts a new session for an Amazon Q App, allowing inputs to be provided and the app to be +run. Each Q App session will be condensed into a single conversation in the web +experience. + +# Arguments +- `app_id`: The unique identifier of the Q App to start a session for. +- `app_version`: The version of the Q App to use for the session. +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"initialValues"`: Optional initial input values to provide for the Q App session. +- `"tags"`: Optional tags to associate with the new Q App session. +""" +function start_qapp_session( + appId, appVersion, instance_id; aws_config::AbstractAWSConfig=global_aws_config() +) + return qapps( + "POST", + "/runtime.startQAppSession", + Dict{String,Any}( + "appId" => appId, + "appVersion" => appVersion, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_qapp_session( + appId, + appVersion, + instance_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/runtime.startQAppSession", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "appId" => appId, + "appVersion" => appVersion, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + stop_qapp_session(instance-id, session_id) + stop_qapp_session(instance-id, session_id, params::Dict{String,<:Any}) + +Stops an active session for an Amazon Q App.This deletes all data related to the session +and makes it invalid for future uses. The results of the session will be persisted as part +of the conversation. + +# Arguments +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. +- `session_id`: The unique identifier of the Q App session to stop. + +""" +function stop_qapp_session( + instance_id, sessionId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qapps( + "POST", + "/runtime.deleteMiniAppRun", + Dict{String,Any}( + "sessionId" => sessionId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function stop_qapp_session( + instance_id, + sessionId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/runtime.deleteMiniAppRun", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "sessionId" => sessionId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + tag_resource(resource_arn, tags) + tag_resource(resource_arn, tags, params::Dict{String,<:Any}) + +Associates tags with an Amazon Q Apps resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the resource to tag. +- `tags`: The tags to associate with the resource. + +""" +function tag_resource(resourceARN, tags; aws_config::AbstractAWSConfig=global_aws_config()) + return qapps( + "POST", + "/tags/$(resourceARN)", + Dict{String,Any}("tags" => tags); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function tag_resource( + resourceARN, + tags, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/tags/$(resourceARN)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tags" => tags), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + untag_resource(resource_arn, tag_keys) + untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) + +Disassociates tags from an Amazon Q Apps resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the resource to disassociate the tag + from. +- `tag_keys`: The keys of the tags to disassociate from the resource. + +""" +function untag_resource( + resourceARN, tagKeys; aws_config::AbstractAWSConfig=global_aws_config() +) + return qapps( + "DELETE", + "/tags/$(resourceARN)", + Dict{String,Any}("tagKeys" => tagKeys); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function untag_resource( + resourceARN, + tagKeys, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "DELETE", + "/tags/$(resourceARN)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tagKeys" => tagKeys), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_library_item(instance-id, library_item_id) + update_library_item(instance-id, library_item_id, params::Dict{String,<:Any}) + +Updates the metadata and status of a library item for an Amazon Q App. + +# Arguments +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. +- `library_item_id`: The unique identifier of the library item to update. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"categories"`: The new categories to associate with the library item. +- `"status"`: The new status to set for the library item, such as \"Published\" or + \"Hidden\". +""" +function update_library_item( + instance_id, libraryItemId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qapps( + "POST", + "/catalog.updateItem", + Dict{String,Any}( + "libraryItemId" => libraryItemId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_library_item( + instance_id, + libraryItemId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/catalog.updateItem", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "libraryItemId" => libraryItemId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_qapp(app_id, instance-id) + update_qapp(app_id, instance-id, params::Dict{String,<:Any}) + +Updates an existing Amazon Q App, allowing modifications to its title, description, and +definition. + +# Arguments +- `app_id`: The unique identifier of the Q App to update. +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"appDefinition"`: The new definition specifying the cards and flow for the Q App. +- `"description"`: The new description for the Q App. +- `"title"`: The new title for the Q App. +""" +function update_qapp(appId, instance_id; aws_config::AbstractAWSConfig=global_aws_config()) + return qapps( + "POST", + "/apps.update", + Dict{String,Any}( + "appId" => appId, "headers" => Dict{String,Any}("instance-id" => instance_id) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_qapp( + appId, + instance_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/apps.update", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "appId" => appId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_qapp_session(instance-id, session_id) + update_qapp_session(instance-id, session_id, params::Dict{String,<:Any}) + +Updates the session for a given Q App sessionId. This is only valid when at least one card +of the session is in the WAITING state. Data for each WAITING card can be provided as +input. If inputs are not provided, the call will be accepted but session will not move +forward. Inputs for cards that are not in the WAITING status will be ignored. + +# Arguments +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. +- `session_id`: The unique identifier of the Q App session to provide input for. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"values"`: The input values to provide for the current state of the Q App session. +""" +function update_qapp_session( + instance_id, sessionId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qapps( + "POST", + "/runtime.updateQAppSession", + Dict{String,Any}( + "sessionId" => sessionId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_qapp_session( + instance_id, + sessionId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/runtime.updateQAppSession", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "sessionId" => sessionId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/qbusiness.jl b/src/services/qbusiness.jl index 9fb58ab1af..4ef143385a 100644 --- a/src/services/qbusiness.jl +++ b/src/services/qbusiness.jl @@ -191,6 +191,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"identityCenterInstanceArn"`: The Amazon Resource Name (ARN) of the IAM Identity Center instance you are either creating for—or connecting to—your Amazon Q Business application. +- `"personalizationConfiguration"`: Configuration information about chat response + personalization. For more information, see Personalizing chat responses - `"qAppsConfiguration"`: An option to allow end users to create and use Amazon Q Apps in the web experience. - `"roleArn"`: The Amazon Resource Name (ARN) of an IAM role with permissions to access @@ -2140,6 +2142,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"identityCenterInstanceArn"`: The Amazon Resource Name (ARN) of the IAM Identity Center instance you are either creating for—or connecting to—your Amazon Q Business application. +- `"personalizationConfiguration"`: Configuration information about chat response + personalization. For more information, see Personalizing chat responses. - `"qAppsConfiguration"`: An option to allow end users to create and use Amazon Q Apps in the web experience. - `"roleArn"`: An Amazon Web Services Identity and Access Management (IAM) role that gives diff --git a/src/services/qconnect.jl b/src/services/qconnect.jl index 29c813e3b2..1680362d5f 100644 --- a/src/services/qconnect.jl +++ b/src/services/qconnect.jl @@ -195,6 +195,80 @@ function create_content( ) end +""" + create_content_association(association, association_type, content_id, knowledge_base_id) + create_content_association(association, association_type, content_id, knowledge_base_id, params::Dict{String,<:Any}) + +Creates an association between a content resource in a knowledge base and step-by-step +guides. Step-by-step guides offer instructions to agents for resolving common customer +issues. You create a content association to integrate Amazon Q in Connect and step-by-step +guides. After you integrate Amazon Q and step-by-step guides, when Amazon Q provides a +recommendation to an agent based on the intent that it's detected, it also provides them +with the option to start the step-by-step guide that you have associated with the content. +Note the following limitations: You can create only one content association for each +content resource in a knowledge base. You can associate a step-by-step guide with +multiple content resources. For more information, see Integrate Amazon Q in Connect with +step-by-step guides in the Amazon Connect Administrator Guide. + +# Arguments +- `association`: The identifier of the associated resource. +- `association_type`: The type of association. +- `content_id`: The identifier of the content. +- `knowledge_base_id`: The identifier of the knowledge base. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier that you provide to ensure the + idempotency of the request. If not provided, the Amazon Web Services SDK populates this + field. For more information about idempotency, see Making retries safe with idempotent APIs. +- `"tags"`: The tags used to organize, track, or control access for this resource. +""" +function create_content_association( + association, + associationType, + contentId, + knowledgeBaseId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "POST", + "/knowledgeBases/$(knowledgeBaseId)/contents/$(contentId)/associations", + Dict{String,Any}( + "association" => association, + "associationType" => associationType, + "clientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_content_association( + association, + associationType, + contentId, + knowledgeBaseId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "POST", + "/knowledgeBases/$(knowledgeBaseId)/contents/$(contentId)/associations", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "association" => association, + "associationType" => associationType, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_knowledge_base(knowledge_base_type, name) create_knowledge_base(knowledge_base_type, name, params::Dict{String,<:Any}) @@ -502,6 +576,50 @@ function delete_content( ) end +""" + delete_content_association(content_association_id, content_id, knowledge_base_id) + delete_content_association(content_association_id, content_id, knowledge_base_id, params::Dict{String,<:Any}) + +Deletes the content association. For more information about content associations--what +they are and when they are used--see Integrate Amazon Q in Connect with step-by-step guides +in the Amazon Connect Administrator Guide. + +# Arguments +- `content_association_id`: The identifier of the content association. Can be either the ID + or the ARN. URLs cannot contain the ARN. +- `content_id`: The identifier of the content. +- `knowledge_base_id`: The identifier of the knowledge base. + +""" +function delete_content_association( + contentAssociationId, + contentId, + knowledgeBaseId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "DELETE", + "/knowledgeBases/$(knowledgeBaseId)/contents/$(contentId)/associations/$(contentAssociationId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_content_association( + contentAssociationId, + contentId, + knowledgeBaseId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "DELETE", + "/knowledgeBases/$(knowledgeBaseId)/contents/$(contentId)/associations/$(contentAssociationId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_import_job(import_job_id, knowledge_base_id) delete_import_job(import_job_id, knowledge_base_id, params::Dict{String,<:Any}) @@ -725,6 +843,50 @@ function get_content( ) end +""" + get_content_association(content_association_id, content_id, knowledge_base_id) + get_content_association(content_association_id, content_id, knowledge_base_id, params::Dict{String,<:Any}) + +Returns the content association. For more information about content associations--what they +are and when they are used--see Integrate Amazon Q in Connect with step-by-step guides in +the Amazon Connect Administrator Guide. + +# Arguments +- `content_association_id`: The identifier of the content association. Can be either the ID + or the ARN. URLs cannot contain the ARN. +- `content_id`: The identifier of the content. +- `knowledge_base_id`: The identifier of the knowledge base. + +""" +function get_content_association( + contentAssociationId, + contentId, + knowledgeBaseId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "GET", + "/knowledgeBases/$(knowledgeBaseId)/contents/$(contentId)/associations/$(contentAssociationId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_content_association( + contentAssociationId, + contentId, + knowledgeBaseId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "GET", + "/knowledgeBases/$(knowledgeBaseId)/contents/$(contentId)/associations/$(contentAssociationId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_content_summary(content_id, knowledge_base_id) get_content_summary(content_id, knowledge_base_id, params::Dict{String,<:Any}) @@ -1026,6 +1188,49 @@ function list_assistants( ) end +""" + list_content_associations(content_id, knowledge_base_id) + list_content_associations(content_id, knowledge_base_id, params::Dict{String,<:Any}) + +Lists the content associations. For more information about content associations--what they +are and when they are used--see Integrate Amazon Q in Connect with step-by-step guides in +the Amazon Connect Administrator Guide. + +# Arguments +- `content_id`: The identifier of the content. +- `knowledge_base_id`: The identifier of the knowledge base. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return per page. +- `"nextToken"`: The token for the next set of results. Use the value returned in the + previous response in the next request to retrieve the next set of results. +""" +function list_content_associations( + contentId, knowledgeBaseId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qconnect( + "GET", + "/knowledgeBases/$(knowledgeBaseId)/contents/$(contentId)/associations"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_content_associations( + contentId, + knowledgeBaseId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "GET", + "/knowledgeBases/$(knowledgeBaseId)/contents/$(contentId)/associations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_contents(knowledge_base_id) list_contents(knowledge_base_id, params::Dict{String,<:Any}) diff --git a/src/services/quicksight.jl b/src/services/quicksight.jl index 6b911be5f0..bfc5e371a1 100644 --- a/src/services/quicksight.jl +++ b/src/services/quicksight.jl @@ -4,6 +4,88 @@ using AWS.AWSServices: quicksight using AWS.Compat using AWS.UUIDs +""" + batch_create_topic_reviewed_answer(answers, aws_account_id, topic_id) + batch_create_topic_reviewed_answer(answers, aws_account_id, topic_id, params::Dict{String,<:Any}) + +Creates new reviewed answers for a Q Topic. + +# Arguments +- `answers`: The definition of the Answers to be created. +- `aws_account_id`: The ID of the Amazon Web Services account that you want to create a + reviewed answer in. +- `topic_id`: The ID for the topic reviewed answer that you want to create. This ID is + unique per Amazon Web Services Region for each Amazon Web Services account. + +""" +function batch_create_topic_reviewed_answer( + Answers, AwsAccountId, TopicId; aws_config::AbstractAWSConfig=global_aws_config() +) + return quicksight( + "POST", + "/accounts/$(AwsAccountId)/topics/$(TopicId)/batch-create-reviewed-answers", + Dict{String,Any}("Answers" => Answers); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function batch_create_topic_reviewed_answer( + Answers, + AwsAccountId, + TopicId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return quicksight( + "POST", + "/accounts/$(AwsAccountId)/topics/$(TopicId)/batch-create-reviewed-answers", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("Answers" => Answers), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + batch_delete_topic_reviewed_answer(aws_account_id, topic_id) + batch_delete_topic_reviewed_answer(aws_account_id, topic_id, params::Dict{String,<:Any}) + +Deletes reviewed answers for Q Topic. + +# Arguments +- `aws_account_id`: The ID of the Amazon Web Services account that you want to delete a + reviewed answers in. +- `topic_id`: The ID for the topic reviewed answer that you want to delete. This ID is + unique per Amazon Web Services Region for each Amazon Web Services account. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AnswerIds"`: The Answer IDs of the Answers to be deleted. +""" +function batch_delete_topic_reviewed_answer( + AwsAccountId, TopicId; aws_config::AbstractAWSConfig=global_aws_config() +) + return quicksight( + "POST", + "/accounts/$(AwsAccountId)/topics/$(TopicId)/batch-delete-reviewed-answers"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function batch_delete_topic_reviewed_answer( + AwsAccountId, + TopicId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return quicksight( + "POST", + "/accounts/$(AwsAccountId)/topics/$(TopicId)/batch-delete-reviewed-answers", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ cancel_ingestion(aws_account_id, data_set_id, ingestion_id) cancel_ingestion(aws_account_id, data_set_id, ingestion_id, params::Dict{String,<:Any}) @@ -5643,6 +5725,44 @@ function list_topic_refresh_schedules( ) end +""" + list_topic_reviewed_answers(aws_account_id, topic_id) + list_topic_reviewed_answers(aws_account_id, topic_id, params::Dict{String,<:Any}) + +Lists all reviewed answers for a Q Topic. + +# Arguments +- `aws_account_id`: The ID of the Amazon Web Services account that containd the reviewed + answers that you want listed. +- `topic_id`: The ID for the topic that contains the reviewed answer that you want to list. + This ID is unique per Amazon Web Services Region for each Amazon Web Services account. + +""" +function list_topic_reviewed_answers( + AwsAccountId, TopicId; aws_config::AbstractAWSConfig=global_aws_config() +) + return quicksight( + "GET", + "/accounts/$(AwsAccountId)/topics/$(TopicId)/reviewed-answers"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_topic_reviewed_answers( + AwsAccountId, + TopicId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return quicksight( + "GET", + "/accounts/$(AwsAccountId)/topics/$(TopicId)/reviewed-answers", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_topics(aws_account_id) list_topics(aws_account_id, params::Dict{String,<:Any}) diff --git a/src/services/rds.jl b/src/services/rds.jl index aaddc12b25..b7a040f69b 100644 --- a/src/services/rds.jl +++ b/src/services/rds.jl @@ -1146,7 +1146,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"DBSubnetGroupName"`: A DB subnet group to associate with this DB cluster. This setting is required to create a Multi-AZ DB cluster. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Constraints: Must match the name of an existing DB subnet group. - Must not be default. Example: mydbsubnetgroup + Example: mydbsubnetgroup - `"DBSystemId"`: Reserved for future use. - `"DatabaseName"`: The name for your database of up to 64 alphanumeric characters. A database named postgres is always created. If this parameter is specified, an additional @@ -1365,22 +1365,22 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Tue | Wed | Thu | Fri | Sat | Sun. Must be in Universal Coordinated Time (UTC). Must be at least 30 minutes. - `"PubliclyAccessible"`: Specifies whether the DB cluster is publicly accessible. When the - DB cluster is publicly accessible, its Domain Name System (DNS) endpoint resolves to the - private IP address from within the DB cluster's virtual private cloud (VPC). It resolves to - the public IP address from outside of the DB cluster's VPC. Access to the DB cluster is - ultimately controlled by the security group it uses. That public access isn't permitted if - the security group assigned to the DB cluster doesn't permit it. When the DB cluster isn't - publicly accessible, it is an internal DB cluster with a DNS name that resolves to a - private IP address. Valid for Cluster Type: Multi-AZ DB clusters only Default: The default - behavior varies depending on whether DBSubnetGroupName is specified. If DBSubnetGroupName - isn't specified, and PubliclyAccessible isn't specified, the following applies: If the - default VPC in the target Region doesn’t have an internet gateway attached to it, the DB - cluster is private. If the default VPC in the target Region has an internet gateway - attached to it, the DB cluster is public. If DBSubnetGroupName is specified, and - PubliclyAccessible isn't specified, the following applies: If the subnets are part of a - VPC that doesn’t have an internet gateway attached to it, the DB cluster is private. If - the subnets are part of a VPC that has an internet gateway attached to it, the DB cluster - is public. + DB cluster is publicly accessible and you connect from outside of the DB cluster's virtual + private cloud (VPC), its Domain Name System (DNS) endpoint resolves to the public IP + address. When you connect from within the same VPC as the DB cluster, the endpoint resolves + to the private IP address. Access to the DB cluster is ultimately controlled by the + security group it uses. That public access isn't permitted if the security group assigned + to the DB cluster doesn't permit it. When the DB cluster isn't publicly accessible, it is + an internal DB cluster with a DNS name that resolves to a private IP address. Valid for + Cluster Type: Multi-AZ DB clusters only Default: The default behavior varies depending on + whether DBSubnetGroupName is specified. If DBSubnetGroupName isn't specified, and + PubliclyAccessible isn't specified, the following applies: If the default VPC in the + target Region doesn’t have an internet gateway attached to it, the DB cluster is private. + If the default VPC in the target Region has an internet gateway attached to it, the DB + cluster is public. If DBSubnetGroupName is specified, and PubliclyAccessible isn't + specified, the following applies: If the subnets are part of a VPC that doesn’t have an + internet gateway attached to it, the DB cluster is private. If the subnets are part of a + VPC that has an internet gateway attached to it, the DB cluster is public. - `"RdsCustomClusterConfiguration"`: Reserved for future use. - `"ReplicationSourceIdentifier"`: The Amazon Resource Name (ARN) of the source DB instance or DB cluster if this DB cluster is created as a read replica. Valid for Cluster Type: @@ -2049,21 +2049,22 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys see Fault Tolerance for an Aurora DB Cluster in the Amazon Aurora User Guide. This setting doesn't apply to RDS Custom DB instances. Default: 1 Valid Values: 0 - 15 - `"PubliclyAccessible"`: Specifies whether the DB instance is publicly accessible. When - the DB instance is publicly accessible, its Domain Name System (DNS) endpoint resolves to - the private IP address from within the DB instance's virtual private cloud (VPC). It - resolves to the public IP address from outside of the DB instance's VPC. Access to the DB - instance is ultimately controlled by the security group it uses. That public access is not - permitted if the security group assigned to the DB instance doesn't permit it. When the DB - instance isn't publicly accessible, it is an internal DB instance with a DNS name that - resolves to a private IP address. Default: The default behavior varies depending on whether - DBSubnetGroupName is specified. If DBSubnetGroupName isn't specified, and - PubliclyAccessible isn't specified, the following applies: If the default VPC in the - target Region doesn’t have an internet gateway attached to it, the DB instance is - private. If the default VPC in the target Region has an internet gateway attached to it, - the DB instance is public. If DBSubnetGroupName is specified, and PubliclyAccessible - isn't specified, the following applies: If the subnets are part of a VPC that doesn’t - have an internet gateway attached to it, the DB instance is private. If the subnets are - part of a VPC that has an internet gateway attached to it, the DB instance is public. + the DB instance is publicly accessible and you connect from outside of the DB instance's + virtual private cloud (VPC), its Domain Name System (DNS) endpoint resolves to the public + IP address. When you connect from within the same VPC as the DB instance, the endpoint + resolves to the private IP address. Access to the DB instance is ultimately controlled by + the security group it uses. That public access is not permitted if the security group + assigned to the DB instance doesn't permit it. When the DB instance isn't publicly + accessible, it is an internal DB instance with a DNS name that resolves to a private IP + address. Default: The default behavior varies depending on whether DBSubnetGroupName is + specified. If DBSubnetGroupName isn't specified, and PubliclyAccessible isn't specified, + the following applies: If the default VPC in the target Region doesn’t have an internet + gateway attached to it, the DB instance is private. If the default VPC in the target + Region has an internet gateway attached to it, the DB instance is public. If + DBSubnetGroupName is specified, and PubliclyAccessible isn't specified, the following + applies: If the subnets are part of a VPC that doesn’t have an internet gateway + attached to it, the DB instance is private. If the subnets are part of a VPC that has an + internet gateway attached to it, the DB instance is public. - `"StorageEncrypted"`: Specifes whether the DB instance is encrypted. By default, it isn't encrypted. For RDS Custom DB instances, either enable this setting or leave it unset. Otherwise, Amazon RDS reports an error. This setting doesn't apply to Amazon Aurora DB @@ -3430,7 +3431,9 @@ clusters, see Multi-AZ DB cluster deployments in the Amazon RDS User Guide. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"DeleteAutomatedBackups"`: Specifies whether to remove automated backups immediately after the DB cluster is deleted. This parameter isn't case-sensitive. The default is to - remove automated backups immediately after the DB cluster is deleted. + remove automated backups immediately after the DB cluster is deleted. You must delete + automated backups for Amazon RDS Multi-AZ DB clusters. For more information about managing + automated backups for RDS Multi-AZ DB clusters, see Managing automated backups. - `"FinalDBSnapshotIdentifier"`: The DB cluster snapshot identifier of the new DB cluster snapshot created when SkipFinalSnapshot is disabled. Specifying this parameter and also skipping the creation of a final DB cluster snapshot with the SkipFinalShapshot parameter @@ -4650,8 +4653,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys exist than the specified MaxRecords value, a pagination token called a marker is included in the response so you can retrieve the remaining results. Default: 100 Constraints: Minimum 20, maximum 100. -- `"Source"`: A specific source to return parameters for. Valid Values: customer - engine service +- `"Source"`: A specific source to return parameters for. Valid Values: user engine + service """ function describe_dbcluster_parameters( DBClusterParameterGroupName; aws_config::AbstractAWSConfig=global_aws_config() @@ -6204,7 +6207,11 @@ end describe_pending_maintenance_actions(params::Dict{String,<:Any}) Returns a list of resources (for example, DB instances) that have at least one pending -maintenance action. +maintenance action. This API follows an eventual consistency model. This means that the +result of the DescribePendingMaintenanceActions command might not be immediately visible to +all subsequent RDS commands. Keep this in mind when you use +DescribePendingMaintenanceActions immediately after using a previous API command such as +ApplyPendingMaintenanceActions. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -7872,16 +7879,17 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys see Fault Tolerance for an Aurora DB Cluster in the Amazon Aurora User Guide. This setting doesn't apply to RDS Custom DB instances. Default: 1 Valid Values: 0 - 15 - `"PubliclyAccessible"`: Specifies whether the DB instance is publicly accessible. When - the DB cluster is publicly accessible, its Domain Name System (DNS) endpoint resolves to - the private IP address from within the DB cluster's virtual private cloud (VPC). It - resolves to the public IP address from outside of the DB cluster's VPC. Access to the DB - cluster is ultimately controlled by the security group it uses. That public access isn't - permitted if the security group assigned to the DB cluster doesn't permit it. When the DB - instance isn't publicly accessible, it is an internal DB instance with a DNS name that - resolves to a private IP address. PubliclyAccessible only applies to DB instances in a - VPC. The DB instance must be part of a public subnet and PubliclyAccessible must be enabled - for it to be publicly accessible. Changes to the PubliclyAccessible parameter are applied - immediately regardless of the value of the ApplyImmediately parameter. + the DB instance is publicly accessible and you connect from outside of the DB instance's + virtual private cloud (VPC), its Domain Name System (DNS) endpoint resolves to the public + IP address. When you connect from within the same VPC as the DB instance, the endpoint + resolves to the private IP address. Access to the DB instance is ultimately controlled by + the security group it uses. That public access isn't permitted if the security group + assigned to the DB instance doesn't permit it. When the DB instance isn't publicly + accessible, it is an internal DB instance with a DNS name that resolves to a private IP + address. PubliclyAccessible only applies to DB instances in a VPC. The DB instance must be + part of a public subnet and PubliclyAccessible must be enabled for it to be publicly + accessible. Changes to the PubliclyAccessible parameter are applied immediately regardless + of the value of the ApplyImmediately parameter. - `"ReplicaMode"`: A value that sets the open mode of a replica database to either mounted or read-only. Currently, this parameter is only supported for Oracle DB instances. Mounted DB replicas are included in Oracle Enterprise Edition. The main use case for @@ -11051,11 +11059,10 @@ end Starts an export of DB snapshot or DB cluster data to Amazon S3. The provided IAM role must have access to the S3 bucket. You can't export snapshot data from Db2 or RDS Custom DB -instances. You can't export cluster data from Multi-AZ DB clusters. For more information on -exporting DB snapshot data, see Exporting DB snapshot data to Amazon S3 in the Amazon RDS -User Guide or Exporting DB cluster snapshot data to Amazon S3 in the Amazon Aurora User -Guide. For more information on exporting DB cluster data, see Exporting DB cluster data to -Amazon S3 in the Amazon Aurora User Guide. +instances. For more information on exporting DB snapshot data, see Exporting DB snapshot +data to Amazon S3 in the Amazon RDS User Guide or Exporting DB cluster snapshot data to +Amazon S3 in the Amazon Aurora User Guide. For more information on exporting DB cluster +data, see Exporting DB cluster data to Amazon S3 in the Amazon Aurora User Guide. # Arguments - `export_task_identifier`: A unique identifier for the export task. This ID isn't an diff --git a/src/services/redshift_serverless.jl b/src/services/redshift_serverless.jl index a322e899d4..0fb80814d1 100644 --- a/src/services/redshift_serverless.jl +++ b/src/services/redshift_serverless.jl @@ -245,8 +245,8 @@ operation. action. This IAM role must allow the Amazon Redshift scheduler to schedule creating snapshots. (Principal scheduler.redshift.amazonaws.com) to assume permissions on your behalf. For more information about the IAM role to use with the Amazon Redshift scheduler, - see Using Identity-Based Policies for Amazon Redshift in the Amazon Redshift Cluster - Management Guide + see Using Identity-Based Policies for Amazon Redshift in the Amazon Redshift Management + Guide - `schedule`: The schedule for a one-time (at timestamp format) or recurring (cron format) scheduled action. Schedule invocations must be separated by at least one hour. Times are in UTC. Format of at timestamp is yyyy-mm-ddThh:mm:ss. For example, 2016-03-04T17:27:00. @@ -501,6 +501,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"enhancedVpcRouting"`: The value that specifies whether to turn on enhanced virtual private cloud (VPC) routing, which forces Amazon Redshift Serverless to route traffic through your VPC instead of over the internet. +- `"ipAddressType"`: The IP address type that the workgroup supports. Possible values are + ipv4 and dualstack. - `"maxCapacity"`: The maximum data-warehouse capacity Amazon Redshift Serverless uses to serve queries. The max capacity is specified in RPUs. - `"port"`: The custom port to use when connecting to a workgroup. Valid port ranges are @@ -2201,8 +2203,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys action. This IAM role must allow the Amazon Redshift scheduler to schedule creating snapshots (Principal scheduler.redshift.amazonaws.com) to assume permissions on your behalf. For more information about the IAM role to use with the Amazon Redshift scheduler, - see Using Identity-Based Policies for Amazon Redshift in the Amazon Redshift Cluster - Management Guide + see Using Identity-Based Policies for Amazon Redshift in the Amazon Redshift Management + Guide - `"schedule"`: The schedule for a one-time (at timestamp format) or recurring (cron format) scheduled action. Schedule invocations must be separated by at least one hour. Times are in UTC. Format of at timestamp is yyyy-mm-ddThh:mm:ss. For example, @@ -2389,6 +2391,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"enhancedVpcRouting"`: The value that specifies whether to turn on enhanced virtual private cloud (VPC) routing, which forces Amazon Redshift Serverless to route traffic through your VPC. +- `"ipAddressType"`: The IP address type that the workgroup supports. Possible values are + ipv4 and dualstack. - `"maxCapacity"`: The maximum data-warehouse capacity Amazon Redshift Serverless uses to serve queries. The max capacity is specified in RPUs. - `"port"`: The custom port to use when connecting to a workgroup. Valid port ranges are diff --git a/src/services/rekognition.jl b/src/services/rekognition.jl index ea1220c2fc..cc561675d6 100644 --- a/src/services/rekognition.jl +++ b/src/services/rekognition.jl @@ -349,6 +349,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys existing dataset or specify the Amazon S3 bucket location of an Amazon Sagemaker format manifest file. If you don't specify datasetSource, an empty dataset is created. To add labeled images to the dataset, You can use the console or call UpdateDatasetEntries. +- `"Tags"`: A set of tags (key-value pairs) that you want to attach to the dataset. """ function create_dataset( DatasetType, ProjectArn; aws_config::AbstractAWSConfig=global_aws_config() @@ -441,6 +442,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys for Content Moderation. Applicable only to adapters. - `"Feature"`: Specifies feature that is being customized. If no value is provided CUSTOM_LABELS is used as a default. +- `"Tags"`: A set of tags (key-value pairs) that you want to attach to the project. """ function create_project(ProjectName; aws_config::AbstractAWSConfig=global_aws_config()) return rekognition( @@ -2148,7 +2150,9 @@ in the sample seen below. Use MaxResults parameter to limit the number of label If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetlabelDetection and populate the NextToken request -parameter with the token value returned from the previous call to GetLabelDetection. +parameter with the token value returned from the previous call to GetLabelDetection. If you +are retrieving results while using the Amazon Simple Notification Service, note that you +will receive an \"ERROR\" notification if the job encounters an issue. # Arguments - `job_id`: Job identifier for the label detection operation for which you want results diff --git a/src/services/s3.jl b/src/services/s3.jl index d9db5a2659..41a5ef3d1d 100644 --- a/src/services/s3.jl +++ b/src/services/s3.jl @@ -301,30 +301,29 @@ Amazon Web Services Identity and Access Management (IAM) identity-based policies Express One Zone in the Amazon S3 User Guide. Response and special errors When the request is an HTTP 1.1 request, the response is chunk encoded. When the request is not an HTTP 1.1 request, the response would not contain the Content-Length. You always need to -read the entire response body to check if the copy succeeds. to keep the connection alive -while we copy the data. If the copy is successful, you receive a response with -information about the copied object. A copy request might return an error when Amazon S3 -receives the copy request or while Amazon S3 is copying the files. A 200 OK response can -contain either a success or an error. If the error occurs before the copy action starts, -you receive a standard Amazon S3 error. If the error occurs during the copy operation, -the error response is embedded in the 200 OK response. For example, in a cross-region copy, -you may encounter throttling and receive a 200 OK response. For more information, see -Resolve the Error 200 response when copying objects to Amazon S3. The 200 OK status code -means the copy was accepted, but it doesn't mean the copy is complete. Another example is -when you disconnect from Amazon S3 before the copy is complete, Amazon S3 might cancel the -copy and you may receive a 200 OK response. You must stay connected to Amazon S3 until the -entire response is successfully received and processed. If you call this API operation -directly, make sure to design your application to parse the content of the response and -handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. -The SDKs detect the embedded error and apply error handling per your configuration settings -(including automatically retrying the request as appropriate). If the condition persists, -the SDKs throw an exception (or, for the SDKs that don't use exceptions, they return an -error). Charge The copy request charge is based on the storage class and Region that -you specify for the destination object. The request can also result in a data retrieval -charge for the source if the source storage class bills for data retrieval. If the copy -source is in a different region, the data transfer is billed to the copy source account. -For pricing information, see Amazon S3 pricing. HTTP Host header syntax Directory -buckets - The HTTP Host header syntax is +read the entire response body to check if the copy succeeds. If the copy is successful, +you receive a response with information about the copied object. A copy request might +return an error when Amazon S3 receives the copy request or while Amazon S3 is copying the +files. A 200 OK response can contain either a success or an error. If the error occurs +before the copy action starts, you receive a standard Amazon S3 error. If the error +occurs during the copy operation, the error response is embedded in the 200 OK response. +For example, in a cross-region copy, you may encounter throttling and receive a 200 OK +response. For more information, see Resolve the Error 200 response when copying objects to +Amazon S3. The 200 OK status code means the copy was accepted, but it doesn't mean the copy +is complete. Another example is when you disconnect from Amazon S3 before the copy is +complete, Amazon S3 might cancel the copy and you may receive a 200 OK response. You must +stay connected to Amazon S3 until the entire response is successfully received and +processed. If you call this API operation directly, make sure to design your application to +parse the content of the response and handle it appropriately. If you use Amazon Web +Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply +error handling per your configuration settings (including automatically retrying the +request as appropriate). If the condition persists, the SDKs throw an exception (or, for +the SDKs that don't use exceptions, they return an error). Charge The copy request +charge is based on the storage class and Region that you specify for the destination +object. The request can also result in a data retrieval charge for the source if the source +storage class bills for data retrieval. If the copy source is in a different region, the +data transfer is billed to the copy source account. For pricing information, see Amazon S3 +pricing. HTTP Host header syntax Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. The following operations are related to CopyObject: PutObject GetObject @@ -2012,7 +2011,7 @@ Permissions General purpose bucket permissions - The following permissions a in your policies when your DeleteObjects request includes specific headers. s3:DeleteObject - To delete an object from a bucket, you must always specify the s3:DeleteObject permission. s3:DeleteObjectVersion - To delete a specific version of -an object from a versiong-enabled bucket, you must specify the s3:DeleteObjectVersion +an object from a versioning-enabled bucket, you must specify the s3:DeleteObjectVersion permission. Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission @@ -4150,6 +4149,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"partNumber"`: Part number of the object being read. This is a positive integer between 1 and 10,000. Effectively performs a 'ranged' HEAD request for the part specified. Useful querying about the size of the part and the number of parts in this object. +- `"response-cache-control"`: Sets the Cache-Control header of the response. +- `"response-content-disposition"`: Sets the Content-Disposition header of the response. +- `"response-content-encoding"`: Sets the Content-Encoding header of the response. +- `"response-content-language"`: Sets the Content-Language header of the response. +- `"response-content-type"`: Sets the Content-Type header of the response. +- `"response-expires"`: Sets the Expires header of the response. - `"versionId"`: Version ID used to reference a specific version of the object. For directory buckets in this API operation, only the null value of the version ID is supported. @@ -7804,12 +7809,12 @@ bucket, you must have the s3:GetObject permission to read the source object th copied. If the destination bucket is a general purpose bucket, you must have the s3:PutObject permission to write the object copy to the destination bucket. For information about permissions required to use the multipart upload API, see Multipart -Upload and Permissions in the Amazon S3 User Guide. Directory bucket permissions - You -must have permissions in a bucket policy or an IAM identity-based policy based on the +upload API and permissions in the Amazon S3 User Guide. Directory bucket permissions - +You must have permissions in a bucket policy or an IAM identity-based policy based on the source and destination bucket types in an UploadPartCopy operation. If the source object that you want to copy is in a directory bucket, you must have the s3express:CreateSession -permission in the Action element of a policy to read the object . By default, the session -is in the ReadWrite mode. If you want to restrict the access, you can explicitly set the +permission in the Action element of a policy to read the object. By default, the session is +in the ReadWrite mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode condition key to ReadOnly on the copy source bucket. If the copy destination is a directory bucket, you must have the s3express:CreateSession permission in the Action element of a policy to write the object to the destination. The diff --git a/src/services/sagemaker.jl b/src/services/sagemaker.jl index bf951fe784..91bd67d209 100644 --- a/src/services/sagemaker.jl +++ b/src/services/sagemaker.jl @@ -3532,6 +3532,101 @@ function create_notebook_instance_lifecycle_config( ) end +""" + create_optimization_job(deployment_instance_type, model_source, optimization_configs, optimization_job_name, output_config, role_arn, stopping_condition) + create_optimization_job(deployment_instance_type, model_source, optimization_configs, optimization_job_name, output_config, role_arn, stopping_condition, params::Dict{String,<:Any}) + +Creates a job that optimizes a model for inference performance. To create the job, you +provide the location of a source model, and you provide the settings for the optimization +techniques that you want the job to apply. When the job completes successfully, SageMaker +uploads the new optimized model to the output destination that you specify. For more +information about how to use this action, and about the supported optimization techniques, +see Optimize model inference with Amazon SageMaker. + +# Arguments +- `deployment_instance_type`: The type of instance that hosts the optimized model that you + create with the optimization job. +- `model_source`: The location of the source model to optimize with an optimization job. +- `optimization_configs`: Settings for each of the optimization techniques that the job + applies. +- `optimization_job_name`: A custom name for the new optimization job. +- `output_config`: Details for where to store the optimized model that you create with the + optimization job. +- `role_arn`: The Amazon Resource Name (ARN) of an IAM role that enables Amazon SageMaker + to perform tasks on your behalf. During model optimization, Amazon SageMaker needs your + permission to: Read input data from an S3 bucket Write model artifacts to an S3 bucket + Write logs to Amazon CloudWatch Logs Publish metrics to Amazon CloudWatch You grant + permissions for all of these tasks to an IAM role. To pass this role to Amazon SageMaker, + the caller of this API must have the iam:PassRole permission. For more information, see + Amazon SageMaker Roles. +- `stopping_condition`: + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"OptimizationEnvironment"`: The environment variables to set in the model container. +- `"Tags"`: A list of key-value pairs associated with the optimization job. For more + information, see Tagging Amazon Web Services resources in the Amazon Web Services General + Reference Guide. +- `"VpcConfig"`: A VPC in Amazon VPC that your optimized model has access to. +""" +function create_optimization_job( + DeploymentInstanceType, + ModelSource, + OptimizationConfigs, + OptimizationJobName, + OutputConfig, + RoleArn, + StoppingCondition; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sagemaker( + "CreateOptimizationJob", + Dict{String,Any}( + "DeploymentInstanceType" => DeploymentInstanceType, + "ModelSource" => ModelSource, + "OptimizationConfigs" => OptimizationConfigs, + "OptimizationJobName" => OptimizationJobName, + "OutputConfig" => OutputConfig, + "RoleArn" => RoleArn, + "StoppingCondition" => StoppingCondition, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_optimization_job( + DeploymentInstanceType, + ModelSource, + OptimizationConfigs, + OptimizationJobName, + OutputConfig, + RoleArn, + StoppingCondition, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sagemaker( + "CreateOptimizationJob", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "DeploymentInstanceType" => DeploymentInstanceType, + "ModelSource" => ModelSource, + "OptimizationConfigs" => OptimizationConfigs, + "OptimizationJobName" => OptimizationJobName, + "OutputConfig" => OutputConfig, + "RoleArn" => RoleArn, + "StoppingCondition" => StoppingCondition, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_pipeline(client_request_token, pipeline_name, role_arn) create_pipeline(client_request_token, pipeline_name, role_arn, params::Dict{String,<:Any}) @@ -5503,7 +5598,8 @@ Delete a hub content reference in order to remove a model from a private hub. # Arguments - `hub_content_name`: The name of the hub content to delete. -- `hub_content_type`: The type of hub content to delete. +- `hub_content_type`: The type of hub content reference to delete. The only supported type + of hub content reference to delete is ModelReference. - `hub_name`: The name of the hub to delete the hub content reference from. """ @@ -6240,6 +6336,45 @@ function delete_notebook_instance_lifecycle_config( ) end +""" + delete_optimization_job(optimization_job_name) + delete_optimization_job(optimization_job_name, params::Dict{String,<:Any}) + +Deletes an optimization job. + +# Arguments +- `optimization_job_name`: The name that you assigned to the optimization job. + +""" +function delete_optimization_job( + OptimizationJobName; aws_config::AbstractAWSConfig=global_aws_config() +) + return sagemaker( + "DeleteOptimizationJob", + Dict{String,Any}("OptimizationJobName" => OptimizationJobName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_optimization_job( + OptimizationJobName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sagemaker( + "DeleteOptimizationJob", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("OptimizationJobName" => OptimizationJobName), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_pipeline(client_request_token, pipeline_name) delete_pipeline(client_request_token, pipeline_name, params::Dict{String,<:Any}) @@ -8493,6 +8628,45 @@ function describe_notebook_instance_lifecycle_config( ) end +""" + describe_optimization_job(optimization_job_name) + describe_optimization_job(optimization_job_name, params::Dict{String,<:Any}) + +Provides the properties of the specified optimization job. + +# Arguments +- `optimization_job_name`: The name that you assigned to the optimization job. + +""" +function describe_optimization_job( + OptimizationJobName; aws_config::AbstractAWSConfig=global_aws_config() +) + return sagemaker( + "DescribeOptimizationJob", + Dict{String,Any}("OptimizationJobName" => OptimizationJobName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_optimization_job( + OptimizationJobName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sagemaker( + "DescribeOptimizationJob", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("OptimizationJobName" => OptimizationJobName), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_pipeline(pipeline_name) describe_pipeline(pipeline_name, params::Dict{String,<:Any}) @@ -11782,6 +11956,53 @@ function list_notebook_instances( ) end +""" + list_optimization_jobs() + list_optimization_jobs(params::Dict{String,<:Any}) + +Lists the optimization jobs in your account and their properties. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"CreationTimeAfter"`: Filters the results to only those optimization jobs that were + created after the specified time. +- `"CreationTimeBefore"`: Filters the results to only those optimization jobs that were + created before the specified time. +- `"LastModifiedTimeAfter"`: Filters the results to only those optimization jobs that were + updated after the specified time. +- `"LastModifiedTimeBefore"`: Filters the results to only those optimization jobs that were + updated before the specified time. +- `"MaxResults"`: The maximum number of optimization jobs to return in the response. The + default is 50. +- `"NameContains"`: Filters the results to only those optimization jobs with a name that + contains the specified string. +- `"NextToken"`: A token that you use to get the next set of results following a truncated + response. If the response to the previous request was truncated, that response provides the + value for this token. +- `"OptimizationContains"`: Filters the results to only those optimization jobs that apply + the specified optimization techniques. You can specify either Quantization or Compilation. +- `"SortBy"`: The field by which to sort the optimization jobs in the response. The default + is CreationTime +- `"SortOrder"`: The sort order for results. The default is Ascending +- `"StatusEquals"`: Filters the results to only those optimization jobs with the specified + status. +""" +function list_optimization_jobs(; aws_config::AbstractAWSConfig=global_aws_config()) + return sagemaker( + "ListOptimizationJobs"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_optimization_jobs( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return sagemaker( + "ListOptimizationJobs", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_pipeline_execution_steps() list_pipeline_execution_steps(params::Dict{String,<:Any}) @@ -13663,6 +13884,45 @@ function stop_notebook_instance( ) end +""" + stop_optimization_job(optimization_job_name) + stop_optimization_job(optimization_job_name, params::Dict{String,<:Any}) + +Ends a running inference optimization job. + +# Arguments +- `optimization_job_name`: The name that you assigned to the optimization job. + +""" +function stop_optimization_job( + OptimizationJobName; aws_config::AbstractAWSConfig=global_aws_config() +) + return sagemaker( + "StopOptimizationJob", + Dict{String,Any}("OptimizationJobName" => OptimizationJobName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function stop_optimization_job( + OptimizationJobName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sagemaker( + "StopOptimizationJob", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("OptimizationJobName" => OptimizationJobName), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ stop_pipeline_execution(client_request_token, pipeline_execution_arn) stop_pipeline_execution(client_request_token, pipeline_execution_arn, params::Dict{String,<:Any}) diff --git a/src/services/secrets_manager.jl b/src/services/secrets_manager.jl index 90c4910c41..3cf851578a 100644 --- a/src/services/secrets_manager.jl +++ b/src/services/secrets_manager.jl @@ -133,7 +133,10 @@ secretsmanager:TagResource. To add replica Regions, you must also have secretsmanager:ReplicateSecretToRegions. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager. To encrypt the secret with a KMS key other than aws/secretsmanager, you need kms:GenerateDataKey and -kms:Decrypt permission to the key. +kms:Decrypt permission to the key. When you enter commands in a command shell, there is a +risk of the command history being accessed or utilities having access to your command +parameters. This is a concern if the command includes the value of a secret. Learn how to +Mitigate the risks of using command-line tools to store Secrets Manager secrets. # Arguments - `name`: The name of the new secret. The secret name can contain ASCII letters, numbers, @@ -725,7 +728,11 @@ log entry when you call this action. Do not include sensitive information in req parameters except SecretBinary, SecretString, or RotationToken because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail. Required permissions: secretsmanager:PutSecretValue. For more information, see IAM policy actions -for Secrets Manager and Authentication and access control in Secrets Manager. +for Secrets Manager and Authentication and access control in Secrets Manager. When you +enter commands in a command shell, there is a risk of the command history being accessed or +utilities having access to your command parameters. This is a concern if the command +includes the value of a secret. Learn how to Mitigate the risks of using command-line tools +to store Secrets Manager secrets. # Arguments - `secret_id`: The ARN or name of the secret to add a new version to. For an ARN, we @@ -1215,8 +1222,12 @@ secretsmanager:UpdateSecret. For more information, see IAM policy actions for S Manager and Authentication and access control in Secrets Manager. If you use a customer managed key, you must also have kms:GenerateDataKey, kms:Encrypt, and kms:Decrypt permissions on the key. If you change the KMS key and you don't have kms:Encrypt permission -to the new key, Secrets Manager does not re-ecrypt existing secret versions with the new -key. For more information, see Secret encryption and decryption. +to the new key, Secrets Manager does not re-encrypt existing secret versions with the new +key. For more information, see Secret encryption and decryption. When you enter commands +in a command shell, there is a risk of the command history being accessed or utilities +having access to your command parameters. This is a concern if the command includes the +value of a secret. Learn how to Mitigate the risks of using command-line tools to store +Secrets Manager secrets. # Arguments - `secret_id`: The ARN or name of the secret. For an ARN, we recommend that you specify a @@ -1239,13 +1250,13 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"KmsKeyId"`: The ARN, key ID, or alias of the KMS key that Secrets Manager uses to encrypt new secret versions as well as any existing versions with the staging labels AWSCURRENT, AWSPENDING, or AWSPREVIOUS. If you don't have kms:Encrypt permission to the new - key, Secrets Manager does not re-ecrypt existing secret versions with the new key. For more - information about versions and staging labels, see Concepts: Version. A key alias is always - prefixed by alias/, for example alias/aws/secretsmanager. For more information, see About - aliases. If you set this to an empty string, Secrets Manager uses the Amazon Web Services - managed key aws/secretsmanager. If this key doesn't already exist in your account, then - Secrets Manager creates it for you automatically. All users and roles in the Amazon Web - Services account automatically have access to use aws/secretsmanager. Creating + key, Secrets Manager does not re-encrypt existing secret versions with the new key. For + more information about versions and staging labels, see Concepts: Version. A key alias is + always prefixed by alias/, for example alias/aws/secretsmanager. For more information, see + About aliases. If you set this to an empty string, Secrets Manager uses the Amazon Web + Services managed key aws/secretsmanager. If this key doesn't already exist in your account, + then Secrets Manager creates it for you automatically. All users and roles in the Amazon + Web Services account automatically have access to use aws/secretsmanager. Creating aws/secretsmanager can result in a one-time significant delay in returning the result. You can only use the Amazon Web Services managed key aws/secretsmanager if you call this operation using credentials from the same Amazon Web Services account that owns the secret. diff --git a/src/services/timestream_query.jl b/src/services/timestream_query.jl index 1ed4086d20..ab7191e474 100644 --- a/src/services/timestream_query.jl +++ b/src/services/timestream_query.jl @@ -620,7 +620,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys The maximum value supported for MaxQueryTCU is 1000. To request an increase to this soft limit, contact Amazon Web Services Support. For information about the default quota for maxQueryTCU, see Default quotas. -- `"QueryPricingModel"`: The pricing model for queries in an account. +- `"QueryPricingModel"`: The pricing model for queries in an account. The + QueryPricingModel parameter is used by several Timestream operations; however, the + UpdateAccountSettings API operation doesn't recognize any values other than COMPUTE_UNITS. """ function update_account_settings(; aws_config::AbstractAWSConfig=global_aws_config()) return timestream_query( diff --git a/src/services/workspaces.jl b/src/services/workspaces.jl index 2d5b341c5f..82b4035df0 100644 --- a/src/services/workspaces.jl +++ b/src/services/workspaces.jl @@ -756,6 +756,75 @@ function create_workspaces( ) end +""" + create_workspaces_pool(bundle_id, capacity, description, directory_id, pool_name) + create_workspaces_pool(bundle_id, capacity, description, directory_id, pool_name, params::Dict{String,<:Any}) + +Creates a pool of WorkSpaces. + +# Arguments +- `bundle_id`: The identifier of the bundle for the pool. +- `capacity`: The user capacity of the pool. +- `description`: The pool description. +- `directory_id`: The identifier of the directory for the pool. +- `pool_name`: The name of the pool. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ApplicationSettings"`: Indicates the application settings of the pool. +- `"Tags"`: The tags for the pool. +- `"TimeoutSettings"`: Indicates the timeout settings of the pool. +""" +function create_workspaces_pool( + BundleId, + Capacity, + Description, + DirectoryId, + PoolName; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return workspaces( + "CreateWorkspacesPool", + Dict{String,Any}( + "BundleId" => BundleId, + "Capacity" => Capacity, + "Description" => Description, + "DirectoryId" => DirectoryId, + "PoolName" => PoolName, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_workspaces_pool( + BundleId, + Capacity, + Description, + DirectoryId, + PoolName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return workspaces( + "CreateWorkspacesPool", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "BundleId" => BundleId, + "Capacity" => Capacity, + "Description" => Description, + "DirectoryId" => DirectoryId, + "PoolName" => PoolName, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_account_link_invitation(link_id) delete_account_link_invitation(link_id, params::Dict{String,<:Any}) @@ -1698,6 +1767,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"Limit"`: The maximum number of directories to return. - `"NextToken"`: If you received a NextToken from a previous call that was paginated, provide this token to receive the next set of results. +- `"WorkspaceDirectoryNames"`: The names of the WorkSpace directories. """ function describe_workspace_directories(; aws_config::AbstractAWSConfig=global_aws_config()) return workspaces( @@ -1890,6 +1960,73 @@ function describe_workspaces_connection_status( ) end +""" + describe_workspaces_pool_sessions(pool_id) + describe_workspaces_pool_sessions(pool_id, params::Dict{String,<:Any}) + +Retrieves a list that describes the streaming sessions for a specified pool. + +# Arguments +- `pool_id`: The identifier of the pool. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Limit"`: The maximum number of items to return. +- `"NextToken"`: If you received a NextToken from a previous call that was paginated, + provide this token to receive the next set of results. +- `"UserId"`: The identifier of the user. +""" +function describe_workspaces_pool_sessions( + PoolId; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces( + "DescribeWorkspacesPoolSessions", + Dict{String,Any}("PoolId" => PoolId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_workspaces_pool_sessions( + PoolId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces( + "DescribeWorkspacesPoolSessions", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("PoolId" => PoolId), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + describe_workspaces_pools() + describe_workspaces_pools(params::Dict{String,<:Any}) + +Describes the specified WorkSpaces Pools. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Filters"`: The filter conditions for the WorkSpaces Pool to return. +- `"Limit"`: The maximum number of items to return. +- `"NextToken"`: If you received a NextToken from a previous call that was paginated, + provide this token to receive the next set of results. +- `"PoolIds"`: The identifier of the WorkSpaces Pools. +""" +function describe_workspaces_pools(; aws_config::AbstractAWSConfig=global_aws_config()) + return workspaces( + "DescribeWorkspacesPools"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function describe_workspaces_pools( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces( + "DescribeWorkspacesPools", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ disassociate_connection_alias(alias_id) disassociate_connection_alias(alias_id, params::Dict{String,<:Any}) @@ -2501,6 +2638,44 @@ function modify_selfservice_permissions( ) end +""" + modify_streaming_properties(resource_id) + modify_streaming_properties(resource_id, params::Dict{String,<:Any}) + +Modifies the specified streaming properties. + +# Arguments +- `resource_id`: The identifier of the resource. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"StreamingProperties"`: The streaming properties to configure. +""" +function modify_streaming_properties( + ResourceId; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces( + "ModifyStreamingProperties", + Dict{String,Any}("ResourceId" => ResourceId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function modify_streaming_properties( + ResourceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return workspaces( + "ModifyStreamingProperties", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ResourceId" => ResourceId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ modify_workspace_access_properties(resource_id, workspace_access_properties) modify_workspace_access_properties(resource_id, workspace_access_properties, params::Dict{String,<:Any}) @@ -2775,29 +2950,28 @@ function rebuild_workspaces( end """ - register_workspace_directory(directory_id, enable_work_docs) - register_workspace_directory(directory_id, enable_work_docs, params::Dict{String,<:Any}) + register_workspace_directory() + register_workspace_directory(params::Dict{String,<:Any}) Registers the specified directory. This operation is asynchronous and returns before the WorkSpace directory is registered. If this is the first time you are registering a directory, you will need to create the workspaces_DefaultRole role before you can register a directory. For more information, see Creating the workspaces_DefaultRole Role. -# Arguments -- `directory_id`: The identifier of the directory. You cannot register a directory if it +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ActiveDirectoryConfig"`: The active directory config of the directory. +- `"DirectoryId"`: The identifier of the directory. You cannot register a directory if it does not have a status of Active. If the directory does not have a status of Active, you will receive an InvalidResourceStateException error. If you have already registered the maximum number of directories that you can register with Amazon WorkSpaces, you will receive a ResourceLimitExceededException error. Deregister directories that you are not using for WorkSpaces, and try again. -- `enable_work_docs`: Indicates whether Amazon WorkDocs is enabled or disabled. If you have - enabled this parameter and WorkDocs is not available in the Region, you will receive an - OperationNotSupportedException error. Set EnableWorkDocs to disabled, and try again. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"EnableSelfService"`: Indicates whether self-service capabilities are enabled or disabled. +- `"EnableWorkDocs"`: Indicates whether Amazon WorkDocs is enabled or disabled. If you have + enabled this parameter and WorkDocs is not available in the Region, you will receive an + OperationNotSupportedException error. Set EnableWorkDocs to disabled, and try again. - `"SubnetIds"`: The identifiers of the subnets for your virtual private cloud (VPC). Make sure that the subnets are in supported Availability Zones. The subnets must also be in separate Availability Zones. If these conditions are not met, you will receive an @@ -2808,34 +2982,22 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Web Services account must be enabled for BYOL. If your account has not been enabled for BYOL, you will receive an InvalidParameterValuesException error. For more information about BYOL images, see Bring Your Own Windows Desktop Images. +- `"UserIdentityType"`: The type of identity management the user is using. +- `"WorkspaceDirectoryDescription"`: Description of the directory to register. +- `"WorkspaceDirectoryName"`: The name of the directory to register. +- `"WorkspaceType"`: Indicates whether the directory's WorkSpace type is personal or pools. """ -function register_workspace_directory( - DirectoryId, EnableWorkDocs; aws_config::AbstractAWSConfig=global_aws_config() -) +function register_workspace_directory(; aws_config::AbstractAWSConfig=global_aws_config()) return workspaces( - "RegisterWorkspaceDirectory", - Dict{String,Any}("DirectoryId" => DirectoryId, "EnableWorkDocs" => EnableWorkDocs); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, + "RegisterWorkspaceDirectory"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET ) end function register_workspace_directory( - DirectoryId, - EnableWorkDocs, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() ) return workspaces( "RegisterWorkspaceDirectory", - Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}( - "DirectoryId" => DirectoryId, "EnableWorkDocs" => EnableWorkDocs - ), - params, - ), - ); + params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) @@ -2993,6 +3155,36 @@ function start_workspaces( ) end +""" + start_workspaces_pool(pool_id) + start_workspaces_pool(pool_id, params::Dict{String,<:Any}) + +Starts the specified pool. You cannot start a pool unless it has a running mode of AutoStop +and a state of STOPPED. + +# Arguments +- `pool_id`: The identifier of the pool. + +""" +function start_workspaces_pool(PoolId; aws_config::AbstractAWSConfig=global_aws_config()) + return workspaces( + "StartWorkspacesPool", + Dict{String,Any}("PoolId" => PoolId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_workspaces_pool( + PoolId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces( + "StartWorkspacesPool", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("PoolId" => PoolId), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ stop_workspaces(stop_workspace_requests) stop_workspaces(stop_workspace_requests, params::Dict{String,<:Any}) @@ -3033,6 +3225,36 @@ function stop_workspaces( ) end +""" + stop_workspaces_pool(pool_id) + stop_workspaces_pool(pool_id, params::Dict{String,<:Any}) + +Stops the specified pool. You cannot stop a WorkSpace pool unless it has a running mode of +AutoStop and a state of AVAILABLE, IMPAIRED, UNHEALTHY, or ERROR. + +# Arguments +- `pool_id`: The identifier of the pool. + +""" +function stop_workspaces_pool(PoolId; aws_config::AbstractAWSConfig=global_aws_config()) + return workspaces( + "StopWorkspacesPool", + Dict{String,Any}("PoolId" => PoolId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function stop_workspaces_pool( + PoolId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces( + "StopWorkspacesPool", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("PoolId" => PoolId), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ terminate_workspaces(terminate_workspace_requests) terminate_workspaces(terminate_workspace_requests, params::Dict{String,<:Any}) @@ -3090,6 +3312,72 @@ function terminate_workspaces( ) end +""" + terminate_workspaces_pool(pool_id) + terminate_workspaces_pool(pool_id, params::Dict{String,<:Any}) + +Terminates the specified pool. + +# Arguments +- `pool_id`: The identifier of the pool. + +""" +function terminate_workspaces_pool( + PoolId; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces( + "TerminateWorkspacesPool", + Dict{String,Any}("PoolId" => PoolId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function terminate_workspaces_pool( + PoolId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces( + "TerminateWorkspacesPool", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("PoolId" => PoolId), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + terminate_workspaces_pool_session(session_id) + terminate_workspaces_pool_session(session_id, params::Dict{String,<:Any}) + +Terminates the pool session. + +# Arguments +- `session_id`: The identifier of the pool session. + +""" +function terminate_workspaces_pool_session( + SessionId; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces( + "TerminateWorkspacesPoolSession", + Dict{String,Any}("SessionId" => SessionId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function terminate_workspaces_pool_session( + SessionId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return workspaces( + "TerminateWorkspacesPoolSession", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("SessionId" => SessionId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_connect_client_add_in(add_in_id, resource_id) update_connect_client_add_in(add_in_id, resource_id, params::Dict{String,<:Any}) @@ -3334,3 +3622,40 @@ function update_workspace_image_permission( feature_set=SERVICE_FEATURE_SET, ) end + +""" + update_workspaces_pool(pool_id) + update_workspaces_pool(pool_id, params::Dict{String,<:Any}) + +Updates the specified pool. + +# Arguments +- `pool_id`: The identifier of the specified pool to update. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ApplicationSettings"`: The persistent application settings for users in the pool. +- `"BundleId"`: The identifier of the bundle. +- `"Capacity"`: The desired capacity for the pool. +- `"Description"`: Describes the specified pool to update. +- `"DirectoryId"`: The identifier of the directory. +- `"TimeoutSettings"`: Indicates the timeout settings of the specified pool. +""" +function update_workspaces_pool(PoolId; aws_config::AbstractAWSConfig=global_aws_config()) + return workspaces( + "UpdateWorkspacesPool", + Dict{String,Any}("PoolId" => PoolId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_workspaces_pool( + PoolId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces( + "UpdateWorkspacesPool", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("PoolId" => PoolId), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end